Merge "msm: cvp: add additional check to avoid out of bound access"
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e6f0191..75bc0a8 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2130,6 +2130,7 @@ const struct dma_map_ops iommu_coherent_ops = {
 };
 
 /**
+ * DEPRECATED
  * arm_iommu_create_mapping
  * @bus: pointer to the bus holding the client device (for IOMMU calls)
  * @base: start address of the valid IO address space
@@ -2232,6 +2233,9 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
 	return 0;
 }
 
+/**
+ * DEPRECATED
+ */
 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
 {
 	if (mapping)
@@ -2256,6 +2260,7 @@ static int __arm_iommu_attach_device(struct device *dev,
 }
 
 /**
+ * DEPRECATED
  * arm_iommu_attach_device
  * @dev: valid struct device pointer
  * @mapping: io address space mapping structure (returned from
@@ -2283,6 +2288,7 @@ int arm_iommu_attach_device(struct device *dev,
 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
 
 /**
+ * DEPRECATED
  * arm_iommu_detach_device
  * @dev: valid struct device pointer
  *
@@ -2313,42 +2319,170 @@ static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
 	return coherent ? &iommu_coherent_ops : &iommu_ops;
 }
 
+static void arm_iommu_dma_release_mapping(struct kref *kref)
+{
+	int i;
+	struct dma_iommu_mapping *mapping =
+			container_of(kref, struct dma_iommu_mapping, kref);
+
+	for (i = 0; i < mapping->nr_bitmaps; i++)
+		kfree(mapping->bitmaps[i]);
+	kfree(mapping->bitmaps);
+	kfree(mapping);
+}
+
+struct dma_iommu_mapping *
+arm_iommu_dma_init_mapping(dma_addr_t base, u64 size)
+{
+	unsigned int bits = size >> PAGE_SHIFT;
+	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
+	struct dma_iommu_mapping *mapping;
+	int extensions = 1;
+	int err = -ENOMEM;
+
+	/* currently only 32-bit DMA address space is supported */
+	if (size > DMA_BIT_MASK(32) + 1)
+		return ERR_PTR(-ERANGE);
+
+	if (!bitmap_size)
+		return ERR_PTR(-EINVAL);
+
+	if (bitmap_size > PAGE_SIZE) {
+		extensions = bitmap_size / PAGE_SIZE;
+		bitmap_size = PAGE_SIZE;
+	}
+
+	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+	if (!mapping)
+		goto err;
+
+	mapping->bitmap_size = bitmap_size;
+	mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
+				   GFP_KERNEL);
+	if (!mapping->bitmaps)
+		goto err2;
+
+	mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!mapping->bitmaps[0])
+		goto err3;
+
+	mapping->nr_bitmaps = 1;
+	mapping->extensions = extensions;
+	mapping->base = base;
+	mapping->bits = BITS_PER_BYTE * bitmap_size;
+
+	spin_lock_init(&mapping->lock);
+
+	kref_init(&mapping->kref);
+	return mapping;
+err3:
+	kfree(mapping->bitmaps);
+err2:
+	kfree(mapping);
+err:
+	return ERR_PTR(err);
+}
+
+/*
+ * Checks for "qcom,iommu-dma-addr-pool" property.
+ * If not present, leaves dma_addr and dma_size unmodified.
+ */
+static void arm_iommu_get_dma_window(struct device *dev, u64 *dma_addr,
+					u64 *dma_size)
+{
+	struct device_node *np;
+	int naddr, nsize, len;
+	const __be32 *ranges;
+
+	if (!dev->of_node)
+		return;
+
+	np = of_parse_phandle(dev->of_node, "qcom,iommu-group", 0);
+	if (!np)
+		np = dev->of_node;
+
+	ranges = of_get_property(np, "qcom,iommu-dma-addr-pool", &len);
+	if (!ranges)
+		return;
+
+	len /= sizeof(u32);
+	naddr = of_n_addr_cells(np);
+	nsize = of_n_size_cells(np);
+	if (len < naddr + nsize) {
+		dev_err(dev, "Invalid length for qcom,iommu-dma-addr-pool, expected %d cells\n",
+			naddr + nsize);
+		return;
+	}
+	if (naddr == 0 || nsize == 0) {
+		dev_err(dev, "Invalid #address-cells %d or #size-cells %d\n",
+			naddr, nsize);
+		return;
+	}
+
+	*dma_addr = of_read_number(ranges, naddr);
+	*dma_size = of_read_number(ranges + naddr, nsize);
+}
+
 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
 				    const struct iommu_ops *iommu)
 {
+	struct iommu_group *group;
+	struct iommu_domain *domain;
 	struct dma_iommu_mapping *mapping;
 
 	if (!iommu)
 		return false;
 
-	mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
+	group = dev->iommu_group;
+	if (!group)
+		return false;
+
+	domain = iommu_get_domain_for_dev(dev);
+	if (!domain)
+		return false;
+
+	arm_iommu_get_dma_window(dev, &dma_base, &size);
+
+	/* Allow iommu-debug to call arch_setup_dma_ops to reconfigure itself */
+	if (domain->type != IOMMU_DOMAIN_DMA &&
+	    !of_device_is_compatible(dev->of_node, "iommu-debug-test")) {
+		dev_err(dev, "Invalid iommu domain type!\n");
+		return false;
+	}
+
+	mapping = arm_iommu_dma_init_mapping(dma_base, size);
 	if (IS_ERR(mapping)) {
-		pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
+		pr_warn("Failed to initialize %llu-byte IOMMU mapping for device %s\n",
 				size, dev_name(dev));
 		return false;
 	}
 
-	if (__arm_iommu_attach_device(dev, mapping)) {
-		pr_warn("Failed to attached device %s to IOMMU_mapping\n",
-				dev_name(dev));
-		arm_iommu_release_mapping(mapping);
-		return false;
-	}
+	mapping->domain = domain;
+	kref_get(&mapping->kref);
+	to_dma_iommu_mapping(dev) = mapping;
 
 	return true;
 }
 
 static void arm_teardown_iommu_dma_ops(struct device *dev)
 {
-	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+	struct dma_iommu_mapping *mapping;
+	int s1_bypass = 0;
 
+	mapping = to_dma_iommu_mapping(dev);
 	if (!mapping)
 		return;
 
-	arm_iommu_detach_device(dev);
-	arm_iommu_release_mapping(mapping);
-}
+	kref_put(&mapping->kref, arm_iommu_dma_release_mapping);
+	to_dma_iommu_mapping(dev) = NULL;
 
+	/* Let arch_setup_dma_ops() start again from scratch upon re-probe */
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+			&s1_bypass);
+	if (!s1_bypass)
+		set_dma_ops(dev, NULL);
+
+}
 #else
 
 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
@@ -2367,6 +2501,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 			const struct iommu_ops *iommu, bool coherent)
 {
 	const struct dma_map_ops *dma_ops;
+	struct dma_iommu_mapping *mapping;
+	int s1_bypass = 0;
 
 	dev->archdata.dma_coherent = coherent;
 
@@ -2378,9 +2514,16 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 	if (dev->dma_ops)
 		return;
 
-	if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
-		dma_ops = arm_get_iommu_dma_map_ops(coherent);
-	else
+	if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) {
+		mapping = to_dma_iommu_mapping(dev);
+		if (mapping)
+			iommu_domain_get_attr(mapping->domain,
+				DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+		if (s1_bypass)
+			dma_ops = arm_get_dma_map_ops(coherent);
+		else
+			dma_ops = arm_get_iommu_dma_map_ops(coherent);
+	} else
 		dma_ops = arm_get_dma_map_ops(coherent);
 
 	set_dma_ops(dev, dma_ops);
@@ -2400,6 +2543,4 @@ void arch_teardown_dma_ops(struct device *dev)
 		return;
 
 	arm_teardown_iommu_dma_ops(dev);
-	/* Let arch_setup_dma_ops() start again from scratch upon re-probe */
-	set_dma_ops(dev, NULL);
 }
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 9c1f97a..baced85 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -52,7 +52,6 @@
 CONFIG_PCI_MSM_MSI=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
-CONFIG_HZ_100=y
 CONFIG_SECCOMP=y
 CONFIG_OKL4_GUEST=y
 # CONFIG_UNMAP_KERNEL_AT_EL0 is not set
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 59205612..00f57a7 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -52,7 +52,6 @@
 CONFIG_PCI_MSM_MSI=y
 CONFIG_SCHED_MC=y
 CONFIG_NR_CPUS=8
-CONFIG_HZ_100=y
 CONFIG_SECCOMP=y
 CONFIG_OKL4_GUEST=y
 # CONFIG_UNMAP_KERNEL_AT_EL0 is not set
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 4996e75..2786c4e6 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -340,7 +340,6 @@ static void __init reset_cpu_topology(void)
 		cpu_topo->llc_id = -1;
 
 		clear_cpu_topology(cpu);
-		cpumask_set_cpu(cpu, &cpu_topo->core_possible_sibling);
 	}
 }
 
diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
index 42df20f..a41573c 100644
--- a/drivers/bus/mhi/controllers/mhi_arch_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
@@ -349,11 +349,8 @@ static  int mhi_arch_pcie_scale_bw(struct mhi_controller *mhi_cntrl,
 {
 	int ret;
 
-	mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data);
 	ret = msm_pcie_set_link_bandwidth(pci_dev, link_info->target_link_speed,
 					  link_info->target_link_width);
-	mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data);
-
 	if (ret)
 		return ret;
 
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index 11a6516..414fd85 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -1424,6 +1424,9 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
 
 	if (ev_ring->rp == dev_rp) {
 		spin_unlock_bh(&mhi_event->lock);
+		read_lock_bh(&mhi_cntrl->pm_lock);
+		mhi_cntrl->wake_put(mhi_cntrl, false);
+		read_unlock_bh(&mhi_cntrl->pm_lock);
 		MHI_VERB("no pending event found\n");
 		goto exit_bw_process;
 	}
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 684666d..4a56171 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -315,8 +315,8 @@ struct fastrpc_static_pd {
 	struct notifier_block pdrnb;
 	struct notifier_block get_service_nb;
 	void *pdrhandle;
-	int pdrcount;
-	int prevpdrcount;
+	uint64_t pdrcount;
+	uint64_t prevpdrcount;
 	int ispdup;
 	int cid;
 };
@@ -338,10 +338,10 @@ struct fastrpc_channel_ctx {
 	struct notifier_block nb;
 	struct mutex smd_mutex;
 	struct mutex rpmsg_mutex;
-	int sesscount;
-	int ssrcount;
+	uint64_t sesscount;
+	uint64_t ssrcount;
 	void *handle;
-	int prevssrcount;
+	uint64_t prevssrcount;
 	int issubsystemup;
 	int vmid;
 	struct secure_vm rhvm;
@@ -442,7 +442,7 @@ struct fastrpc_file {
 	int sessionid;
 	int tgid;
 	int cid;
-	int ssrcount;
+	uint64_t ssrcount;
 	int pd;
 	char *servloc_name;
 	int file_close;
@@ -1307,7 +1307,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
 	struct smq_invoke_ctx *ctx = NULL;
 	struct fastrpc_ctx_lst *clst = &fl->clst;
 	struct fastrpc_ioctl_invoke *invoke = &invokefd->inv;
-	unsigned int cid;
+	int cid;
 
 	bufs = REMOTE_SCALARS_LENGTH(invoke->sc);
 	size = bufs * sizeof(*ctx->lpra) + bufs * sizeof(*ctx->maps) +
@@ -3285,7 +3285,8 @@ static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
 			int secure, struct fastrpc_session_ctx **session)
 {
 	struct fastrpc_apps *me = &gfa;
-	int idx = 0, err = 0;
+	uint64_t idx = 0;
+	int err = 0;
 
 	if (chan->sesscount) {
 		for (idx = 0; idx < chan->sesscount; ++idx) {
@@ -3603,13 +3604,13 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
 			len += scnprintf(fileinfo + len,
 				DEBUGFS_SIZE - len, "%-7s", chan->subsys);
 			len += scnprintf(fileinfo + len,
-				DEBUGFS_SIZE - len, "|%-10d",
+				DEBUGFS_SIZE - len, "|%-10u",
 				chan->sesscount);
 			len += scnprintf(fileinfo + len,
 				DEBUGFS_SIZE - len, "|%-14d",
 				chan->issubsystemup);
 			len += scnprintf(fileinfo + len,
-				DEBUGFS_SIZE - len, "|%-9d",
+				DEBUGFS_SIZE - len, "|%-9u",
 				chan->ssrcount);
 			for (j = 0; j < chan->sesscount; j++) {
 				sess_used += chan->session[j].used;
@@ -3665,7 +3666,7 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
 			"%s %7s %d\n", "sessionid", ":", fl->sessionid);
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
-			"%s %8s %d\n", "ssrcount", ":", fl->ssrcount);
+			"%s %8s %u\n", "ssrcount", ":", fl->ssrcount);
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
 			"%s %14s %d\n", "pd", ":", fl->pd);
 		len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
@@ -4482,8 +4483,8 @@ static int fastrpc_cb_probe(struct device *dev)
 	struct fastrpc_session_ctx *sess;
 	struct of_phandle_args iommuspec;
 	const char *name;
-	int err = 0;
-	unsigned int sharedcb_count = 0, cid, i, j;
+	int err = 0, cid = -1, i = 0;
+	u32 sharedcb_count = 0, j = 0;
 
 	VERIFY(err, NULL != (name = of_get_property(dev->of_node,
 					 "label", NULL)));
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 23e8e3d..4937d0b 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -597,12 +597,14 @@ static void socket_read_work_fn(struct work_struct *work)
 						     struct diag_socket_info,
 						     read_work);
 
-	if (!info)
+	if (!info) {
+		diag_ws_release();
 		return;
-
+	}
 	mutex_lock(&info->socket_info_mutex);
 	if (!info->hdl || !info->hdl->sk) {
 		mutex_unlock(&info->socket_info_mutex);
+		diag_ws_release();
 		return;
 	}
 	err = sock_error(info->hdl->sk);
@@ -611,6 +613,7 @@ static void socket_read_work_fn(struct work_struct *work)
 		socket_close_channel(info);
 		if (info->port_type == PORT_TYPE_SERVER)
 			socket_init_work_fn(&info->init_work);
+		diag_ws_release();
 		return;
 	}
 
diff --git a/drivers/clk/qcom/debugcc-lito.c b/drivers/clk/qcom/debugcc-lito.c
index 09265c9..2502d20 100644
--- a/drivers/clk/qcom/debugcc-lito.c
+++ b/drivers/clk/qcom/debugcc-lito.c
@@ -280,8 +280,6 @@ static const char *const gcc_debug_mux_parent_names[] = {
 	"gcc_camera_ahb_clk",
 	"gcc_camera_hf_axi_clk",
 	"gcc_camera_sf_axi_clk",
-	"gcc_camera_throttle_hf_axi_clk",
-	"gcc_camera_throttle_sf_axi_clk",
 	"gcc_camera_xo_clk",
 	"gcc_cfg_noc_usb3_prim_axi_clk",
 	"gcc_cpuss_ahb_clk",
@@ -292,8 +290,6 @@ static const char *const gcc_debug_mux_parent_names[] = {
 	"gcc_disp_gpll0_clk_src",
 	"gcc_disp_hf_axi_clk",
 	"gcc_disp_sf_axi_clk",
-	"gcc_disp_throttle_hf_axi_clk",
-	"gcc_disp_throttle_sf_axi_clk",
 	"gcc_disp_xo_clk",
 	"gcc_gp1_clk",
 	"gcc_gp2_clk",
@@ -365,8 +361,6 @@ static const char *const gcc_debug_mux_parent_names[] = {
 	"gcc_usb3_prim_phy_pipe_clk",
 	"gcc_video_ahb_clk",
 	"gcc_video_axi_clk",
-	"gcc_video_throttle1_axi_clk",
-	"gcc_video_throttle_axi_clk",
 	"gcc_video_xo_clk",
 	"gpu_cc_debug_mux",
 	"mc_cc_debug_mux",
@@ -386,8 +380,6 @@ static int gcc_debug_mux_sels[] = {
 	0x3F,		/* gcc_camera_ahb_clk */
 	0x47,		/* gcc_camera_hf_axi_clk */
 	0x48,		/* gcc_camera_sf_axi_clk */
-	0x57,		/* gcc_camera_throttle_hf_axi_clk */
-	0x58,		/* gcc_camera_throttle_sf_axi_clk */
 	0x4C,		/* gcc_camera_xo_clk */
 	0x1C,		/* gcc_cfg_noc_usb3_prim_axi_clk */
 	0xD5,		/* gcc_cpuss_ahb_clk */
@@ -398,8 +390,6 @@ static int gcc_debug_mux_sels[] = {
 	0x5B,		/* gcc_disp_gpll0_clk_src */
 	0x49,		/* gcc_disp_hf_axi_clk */
 	0x4A,		/* gcc_disp_sf_axi_clk */
-	0x55,		/* gcc_disp_throttle_hf_axi_clk */
-	0x56,		/* gcc_disp_throttle_sf_axi_clk */
 	0x4D,		/* gcc_disp_xo_clk */
 	0xE4,		/* gcc_gp1_clk */
 	0xE5,		/* gcc_gp2_clk */
@@ -471,8 +461,6 @@ static int gcc_debug_mux_sels[] = {
 	0x75,		/* gcc_usb3_prim_phy_pipe_clk */
 	0x3E,		/* gcc_video_ahb_clk */
 	0x5A,		/* gcc_video_axi_clk */
-	0x5C,		/* gcc_video_throttle1_axi_clk */
-	0x46,		/* gcc_video_throttle_axi_clk */
 	0x4B,		/* gcc_video_xo_clk */
 	0x129,		/* gpu_cc_debug_mux */
 	0xC5,		/* mc_cc_debug_mux */
@@ -513,7 +501,6 @@ static const char *const gpu_cc_debug_mux_parent_names[] = {
 	"gpu_cc_cxo_clk",
 	"gpu_cc_gx_gmu_clk",
 	"gpu_cc_gx_vsense_clk",
-	"gpu_cc_rbcpr_clk",
 	"gpu_cc_sleep_clk",
 	"measure_only_gpu_cc_cx_gfx3d_clk",
 	"measure_only_gpu_cc_cx_gfx3d_slv_clk",
@@ -530,7 +517,6 @@ static int gpu_cc_debug_mux_sels[] = {
 	0x19,		/* gpu_cc_cxo_clk */
 	0xF,		/* gpu_cc_gx_gmu_clk */
 	0xC,		/* gpu_cc_gx_vsense_clk */
-	0x1C,		/* gpu_cc_rbcpr_clk */
 	0x16,		/* gpu_cc_sleep_clk */
 	0x1A,		/* measure_only_gpu_cc_cx_gfx3d_clk */
 	0x1B,		/* measure_only_gpu_cc_cx_gfx3d_slv_clk */
@@ -558,8 +544,6 @@ static struct clk_debug_mux gpu_cc_debug_mux = {
 };
 
 static const char *const npu_cc_debug_mux_parent_names[] = {
-	"npu_cc_aon_clk",
-	"npu_cc_atb_clk",
 	"npu_cc_bto_core_clk",
 	"npu_cc_bwmon_clk",
 	"npu_cc_cal_hm0_cdc_clk",
@@ -591,8 +575,6 @@ static const char *const npu_cc_debug_mux_parent_names[] = {
 };
 
 static int npu_cc_debug_mux_sels[] = {
-	0x5,		/* npu_cc_aon_clk */
-	0x17,		/* npu_cc_atb_clk */
 	0x19,		/* npu_cc_bto_core_clk */
 	0x18,		/* npu_cc_bwmon_clk */
 	0xB,		/* npu_cc_cal_hm0_cdc_clk */
@@ -645,7 +627,6 @@ static struct clk_debug_mux npu_cc_debug_mux = {
 
 static const char *const video_cc_debug_mux_parent_names[] = {
 	"video_cc_apb_clk",
-	"video_cc_at_clk",
 	"video_cc_mvs0_axi_clk",
 	"video_cc_mvs0_core_clk",
 	"video_cc_mvs1_axi_clk",
@@ -659,7 +640,6 @@ static const char *const video_cc_debug_mux_parent_names[] = {
 
 static int video_cc_debug_mux_sels[] = {
 	0xD,		/* video_cc_apb_clk */
-	0x10,		/* video_cc_at_clk */
 	0xA,		/* video_cc_mvs0_axi_clk */
 	0x3,		/* video_cc_mvs0_core_clk */
 	0xB,		/* video_cc_mvs1_axi_clk */
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index c23b191..0637793 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -447,7 +447,8 @@ struct gpi_dev {
 	struct dentry *dentry;
 };
 
-static struct gpi_dev *gpi_dev_dbg;
+static struct gpi_dev *gpi_dev_dbg[5];
+static int arr_idx;
 
 struct reg_info {
 	char *name;
@@ -581,6 +582,7 @@ struct gpii {
 	struct gpi_reg_table dbg_reg_table;
 	bool reg_table_dump;
 	u32 dbg_gpi_irq_cnt;
+	bool ieob_set;
 };
 
 struct gpi_desc {
@@ -1496,20 +1498,6 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
 		return;
 	}
 	gpi_desc = to_gpi_desc(vd);
-
-	/* Event TR RP gen. don't match descriptor TR */
-	if (gpi_desc->wp != tre) {
-		spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
-		GPII_ERR(gpii, gpii_chan->chid,
-			 "EOT/EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
-			 to_physical(ch_ring, gpi_desc->wp),
-			 to_physical(ch_ring, tre));
-		gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
-				      __LINE__);
-		return;
-	}
-
-	list_del(&vd->node);
 	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
 
 
@@ -1525,6 +1513,9 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
 	/* make sure rp updates are immediately visible to all cores */
 	smp_wmb();
 
+	if (imed_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set)
+		return;
+
 	tx_cb_param = vd->tx.callback_param;
 	if (vd->tx.callback && tx_cb_param) {
 		struct msm_gpi_tre *imed_tre = &tx_cb_param->imed_tre;
@@ -1540,7 +1531,12 @@ static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
 		tx_cb_param->status = imed_event->status;
 		vd->tx.callback(tx_cb_param);
 	}
+
+	spin_lock_irqsave(&gpii_chan->vc.lock, flags);
+	list_del(&vd->node);
+	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
 	kfree(gpi_desc);
+	gpi_desc = NULL;
 }
 
 /* processing transfer completion events */
@@ -1583,20 +1579,6 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
 	}
 
 	gpi_desc = to_gpi_desc(vd);
-
-	/* TRE Event generated didn't match descriptor's TRE */
-	if (gpi_desc->wp != ev_rp) {
-		spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
-		GPII_ERR(gpii, gpii_chan->chid,
-			 "EOT\EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
-			 to_physical(ch_ring, gpi_desc->wp),
-			 to_physical(ch_ring, ev_rp));
-		gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
-				      __LINE__);
-		return;
-	}
-
-	list_del(&vd->node);
 	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
 
 
@@ -1612,6 +1594,9 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
 	/* update must be visible to other cores */
 	smp_wmb();
 
+	if (compl_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set)
+		return;
+
 	tx_cb_param = vd->tx.callback_param;
 	if (vd->tx.callback && tx_cb_param) {
 		GPII_VERB(gpii, gpii_chan->chid,
@@ -1623,7 +1608,13 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
 		tx_cb_param->status = compl_event->status;
 		vd->tx.callback(tx_cb_param);
 	}
+
+	spin_lock_irqsave(&gpii_chan->vc.lock, flags);
+	list_del(&vd->node);
+	spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
 	kfree(gpi_desc);
+	gpi_desc = NULL;
+
 }
 
 /* process all events */
@@ -1843,12 +1834,12 @@ static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
 		{
 			gpii_chan->ch_cntxt_base_reg,
 			CNTXT_3_RING_BASE_MSB,
-			(u32)(ring->phys_addr >> 32),
+			MSM_GPI_RING_PHYS_ADDR_UPPER(ring->phys_addr),
 		},
 		{ /* program MSB of DB register with ring base */
 			gpii_chan->ch_cntxt_db_reg,
 			CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
-			(u32)(ring->phys_addr >> 32),
+			MSM_GPI_RING_PHYS_ADDR_UPPER(ring->phys_addr),
 		},
 		{
 			gpii->regs,
@@ -1937,13 +1928,13 @@ static int gpi_alloc_ev_chan(struct gpii *gpii)
 		{
 			gpii->ev_cntxt_base_reg,
 			CNTXT_3_RING_BASE_MSB,
-			(u32)(ring->phys_addr >> 32),
+			MSM_GPI_RING_PHYS_ADDR_UPPER(ring->phys_addr),
 		},
 		{
 			/* program db msg with ring base msb */
 			gpii->ev_cntxt_db_reg,
 			CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
-			(u32)(ring->phys_addr >> 32),
+			MSM_GPI_RING_PHYS_ADDR_UPPER(ring->phys_addr),
 		},
 		{
 			gpii->ev_cntxt_base_reg,
@@ -2299,6 +2290,7 @@ void gpi_desc_free(struct virt_dma_desc *vd)
 	struct gpi_desc *gpi_desc = to_gpi_desc(vd);
 
 	kfree(gpi_desc);
+	gpi_desc = NULL;
 }
 
 /* copy tre into transfer ring */
@@ -2319,6 +2311,7 @@ struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
 	void *tre, *wp = NULL;
 	const gfp_t gfp = GFP_ATOMIC;
 	struct gpi_desc *gpi_desc;
+	gpii->ieob_set = false;
 
 	GPII_VERB(gpii, gpii_chan->chid, "enter\n");
 
@@ -2352,10 +2345,22 @@ struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
 	}
 
 	/* copy each tre into transfer ring */
-	for_each_sg(sgl, sg, sg_len, i)
-		for (j = 0, tre = sg_virt(sg); j < sg->length;
+	for_each_sg(sgl, sg, sg_len, i) {
+		tre = sg_virt(sg);
+
+		/* Check if last tre has ieob set */
+		if (i == sg_len - 1) {
+			if ((((struct msm_gpi_tre *)tre)->dword[3] &
+					GPI_IEOB_BMSK) >> GPI_IEOB_BMSK_SHIFT)
+				gpii->ieob_set = true;
+			else
+				gpii->ieob_set = false;
+		}
+
+		for (j = 0; j < sg->length;
 		     j += ch_ring->el_size, tre += ch_ring->el_size)
 			gpi_queue_xfer(gpii, gpii_chan, tre, &wp);
+	}
 
 	/* set up the descriptor */
 	gpi_desc->db = ch_ring->wp;
@@ -2807,7 +2812,8 @@ static int gpi_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	/* debug purpose */
-	gpi_dev_dbg = gpi_dev;
+	gpi_dev_dbg[arr_idx] = gpi_dev;
+	arr_idx++;
 
 	gpi_dev->dev = &pdev->dev;
 	gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
diff --git a/drivers/dma/qcom/msm_gpi_mmio.h b/drivers/dma/qcom/msm_gpi_mmio.h
index 46ed27e..74a390b 100644
--- a/drivers/dma/qcom/msm_gpi_mmio.h
+++ b/drivers/dma/qcom/msm_gpi_mmio.h
@@ -228,3 +228,7 @@ enum CNTXT_OFFS {
 #define GPI_DEBUG_QSB_LOG_1 (0x5068)
 #define GPI_DEBUG_QSB_LOG_2 (0x506C)
 #define GPI_DEBUG_QSB_LOG_LAST_MISC_ID(n) (0x5070 + (0x4*n))
+
+/* IEOB bit set */
+#define GPI_IEOB_BMSK (0x100)
+#define GPI_IEOB_BMSK_SHIFT (8)
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index da9c986..de15bf5 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -43,15 +43,6 @@
 	  Say Y here to enable GPIO based extcon support. Note that GPIO
 	  extcon supports single state per extcon instance.
 
-config EXTCON_STORAGE_CD_GPIO
-	tristate "Storage card detect GPIO extcon support"
-	depends on GPIOLIB || COMPILE_TEST
-	help
-	  Say Y here to enable removable storage card detect GPIO based
-	  extcon support. It helps when different kinds of storage cards
-	  share one detect GPIO. Note that storage card detect GPIO extcon
-	  supports single state per extcon instance.
-
 config EXTCON_INTEL_INT3496
 	tristate "Intel INT3496 ACPI device extcon driver"
 	depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST)
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 3ecee74..0888fde 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -9,7 +9,6 @@
 obj-$(CONFIG_EXTCON_ARIZONA)	+= extcon-arizona.o
 obj-$(CONFIG_EXTCON_AXP288)	+= extcon-axp288.o
 obj-$(CONFIG_EXTCON_GPIO)	+= extcon-gpio.o
-obj-$(CONFIG_EXTCON_STORAGE_CD_GPIO)	+= extcon-storage-cd-gpio.o
 obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
 obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o
 obj-$(CONFIG_EXTCON_MAX14577)	+= extcon-max14577.o
diff --git a/drivers/extcon/extcon-storage-cd-gpio.c b/drivers/extcon/extcon-storage-cd-gpio.c
deleted file mode 100644
index 189956d..0000000
--- a/drivers/extcon/extcon-storage-cd-gpio.c
+++ /dev/null
@@ -1,211 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
- *
- */
-
-#include <linux/extcon-provider.h>
-#include <linux/gpio/consumer.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of_gpio.h>
-
-struct cd_gpio_extcon_data {
-	struct extcon_dev *edev;
-	int irq;
-	struct gpio_desc *gpiod;
-	unsigned int extcon_id;
-	unsigned long irq_flags;
-	struct pinctrl *pctrl;
-	struct pinctrl_state *pins_default;
-	unsigned int *supported_cable;
-};
-
-static irqreturn_t cd_gpio_threaded_irq_handler(int irq, void *dev_id)
-{
-	int state;
-	struct cd_gpio_extcon_data *data = dev_id;
-
-	state = gpiod_get_value_cansleep(data->gpiod);
-	extcon_set_state_sync(data->edev, data->extcon_id, state);
-
-	return IRQ_HANDLED;
-}
-
-static int extcon_parse_pinctrl_data(struct device *dev,
-				     struct cd_gpio_extcon_data *data)
-{
-	struct pinctrl *pctrl;
-	int ret = 0;
-
-	/* Try to obtain pinctrl handle */
-	pctrl = devm_pinctrl_get(dev);
-	if (IS_ERR(pctrl)) {
-		ret = PTR_ERR(pctrl);
-		goto out;
-	}
-	data->pctrl = pctrl;
-
-	/* Look-up and keep the state handy to be used later */
-	data->pins_default = pinctrl_lookup_state(data->pctrl, "default");
-	if (IS_ERR(data->pins_default)) {
-		ret = PTR_ERR(data->pins_default);
-		dev_err(dev, "Can't get default pinctrl state, ret %d\n", ret);
-	}
-out:
-	return ret;
-}
-
-static int extcon_populate_data(struct device *dev,
-				struct cd_gpio_extcon_data *data)
-{
-	struct device_node *np = dev->of_node;
-	u32 val;
-	int ret = 0;
-
-	ret = of_property_read_u32(np, "extcon-id", &data->extcon_id);
-	if (ret) {
-		dev_err(dev, "failed to read extcon-id property, %d\n", ret);
-		goto out;
-	}
-
-	ret = of_property_read_u32(np, "irq-flags", &val);
-	if (ret) {
-		dev_err(dev, "failed to read irq-flags property, %d\n", ret);
-		goto out;
-	}
-	data->irq_flags = val;
-
-	ret = extcon_parse_pinctrl_data(dev, data);
-	if (ret)
-		dev_err(dev, "failed to parse pinctrl data\n");
-
-out:
-	return ret;
-}
-
-static int cd_gpio_extcon_probe(struct platform_device *pdev)
-{
-	struct cd_gpio_extcon_data *data;
-	struct device *dev = &pdev->dev;
-	int state, ret;
-
-	data = devm_kzalloc(dev, sizeof(struct cd_gpio_extcon_data),
-			    GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
-	if (!data->irq_flags) {
-		/* try populating cd gpio extcon data from device tree */
-		ret = extcon_populate_data(dev, data);
-		if (ret)
-			return ret;
-	}
-	if (!data->irq_flags || data->extcon_id >= EXTCON_NUM)
-		return -EINVAL;
-
-	ret = pinctrl_select_state(data->pctrl, data->pins_default);
-	if (ret < 0)
-		dev_err(dev, "pinctrl state select failed, ret %d\n", ret);
-
-	data->gpiod = devm_gpiod_get(dev, "extcon", GPIOD_IN);
-	if (IS_ERR(data->gpiod))
-		return PTR_ERR(data->gpiod);
-
-	data->irq = gpiod_to_irq(data->gpiod);
-	if (data->irq <= 0)
-		return data->irq;
-
-	data->supported_cable = devm_kzalloc(dev,
-					     sizeof(*data->supported_cable) * 2,
-					     GFP_KERNEL);
-	if (!data->supported_cable)
-		return -ENOMEM;
-
-	data->supported_cable[0] = data->extcon_id;
-	data->supported_cable[1] = EXTCON_NONE;
-	/* Allocate the memory of extcon devie and register extcon device */
-	data->edev = devm_extcon_dev_allocate(dev, data->supported_cable);
-	if (IS_ERR(data->edev)) {
-		dev_err(dev, "failed to allocate extcon device\n");
-		return -ENOMEM;
-	}
-
-	ret = devm_extcon_dev_register(dev, data->edev);
-	if (ret < 0)
-		return ret;
-
-	ret = devm_request_threaded_irq(dev, data->irq, NULL,
-				  cd_gpio_threaded_irq_handler,
-				  data->irq_flags | IRQF_ONESHOT,
-				  pdev->name, data);
-	if (ret < 0)
-		return ret;
-
-	ret = enable_irq_wake(data->irq);
-	if (ret)
-		return ret;
-
-	platform_set_drvdata(pdev, data);
-
-	/* Update initial state */
-	state = gpiod_get_value_cansleep(data->gpiod);
-	extcon_set_state(data->edev, data->extcon_id, state);
-
-	return 0;
-}
-
-static int cd_gpio_extcon_remove(struct platform_device *pdev)
-{
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int cd_gpio_extcon_resume(struct device *dev)
-{
-	struct cd_gpio_extcon_data *data;
-	int state, ret = 0;
-
-	data = dev_get_drvdata(dev);
-	state = gpiod_get_value_cansleep(data->gpiod);
-	ret = extcon_set_state_sync(data->edev, data->extcon_id, state);
-	if (ret)
-		dev_err(dev, "%s: Failed to set extcon gpio state\n",
-				__func__);
-
-	return ret;
-}
-
-static const struct dev_pm_ops cd_gpio_extcon_pm_ops = {
-	SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, cd_gpio_extcon_resume)
-};
-
-#define EXTCON_GPIO_PMOPS (&cd_gpio_extcon_pm_ops)
-
-#else
-#define EXTCON_GPIO_PMOPS NULL
-#endif
-
-static const struct of_device_id extcon_cd_gpio_of_match[] = {
-	{ .compatible = "extcon-storage-cd-gpio"},
-	{},
-};
-
-static struct platform_driver cd_gpio_extcon_driver = {
-	.probe		= cd_gpio_extcon_probe,
-	.remove		= cd_gpio_extcon_remove,
-	.driver		= {
-		.name	= "extcon-storage-cd-gpio",
-		.pm	= EXTCON_GPIO_PMOPS,
-		.of_match_table = of_match_ptr(extcon_cd_gpio_of_match),
-	},
-};
-
-module_platform_driver(cd_gpio_extcon_driver);
-
-MODULE_DESCRIPTION("Storage card detect GPIO based extcon driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index f946ef7..8835b5f 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3519,7 +3519,6 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
 	aux->ddc.class = I2C_CLASS_DDC;
 	aux->ddc.owner = THIS_MODULE;
 	aux->ddc.dev.parent = aux->dev;
-	aux->ddc.dev.of_node = aux->dev->of_node;
 
 	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
 		sizeof(aux->ddc.name));
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 0f9608e..f9bf55f 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -586,6 +586,9 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
 	unsigned int status = 0, fence = 0, fence_retries = 0, tmp, int_bit;
 	unsigned int shadow_status = 0;
 	int i;
+	u64 ts, ts1, ts2;
+
+	ts = gmu_core_dev_read_ao_counter(device);
 
 	atomic_inc(&adreno_dev->pending_irq_refcnt);
 	/* Ensure this increment is done before the IRQ status is updated */
@@ -612,6 +615,8 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
 				&fence);
 
 		while (fence != 0) {
+			ts1 =  gmu_core_dev_read_ao_counter(device);
+
 			/* Wait for small time before trying again */
 			udelay(1);
 			adreno_readreg(adreno_dev,
@@ -619,14 +624,17 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
 					&fence);
 
 			if (fence_retries == FENCE_RETRY_MAX && fence != 0) {
+				ts2 =  gmu_core_dev_read_ao_counter(device);
+
 				adreno_readreg(adreno_dev,
 					ADRENO_REG_GMU_RBBM_INT_UNMASKED_STATUS,
 					&shadow_status);
 
 				dev_crit_ratelimited(device->dev,
-					"Status=0x%x Unmasked status=0x%x Mask=0x%x\n",
+					"Status=0x%x Unmasked status=0x%x Timestamps:%llx %llx %llx\n",
 					shadow_status & irq_params->mask,
-					shadow_status, irq_params->mask);
+					shadow_status, ts, ts1, ts2);
+
 				adreno_set_gpu_fault(adreno_dev,
 						ADRENO_GMU_FAULT);
 				adreno_dispatcher_schedule(KGSL_DEVICE
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index cb45de5..b35334c 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -244,31 +244,6 @@ static inline int timed_poll_check_rscc(struct kgsl_device *device,
 	return -ETIMEDOUT;
 }
 
-/*
- * read_AO_counter() - Returns the 64bit always on counter value
- *
- * @device: Pointer to KGSL device
- */
-static inline uint64_t read_AO_counter(struct kgsl_device *device)
-{
-	unsigned int l, h, h1;
-
-	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h);
-	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
-	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h1);
-
-	/*
-	 * If there's no change in COUNTER_H we have no overflow so return,
-	 * otherwise read COUNTER_L again
-	 */
-
-	if (h == h1)
-		return (uint64_t) l | ((uint64_t) h << 32);
-
-	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
-	return (uint64_t) l | ((uint64_t) h1 << 32);
-}
-
 /* Preemption functions */
 void a6xx_preemption_trigger(struct adreno_device *adreno_dev);
 void a6xx_preemption_schedule(struct adreno_device *adreno_dev);
@@ -297,4 +272,5 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev);
 int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev);
 void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev);
 bool a6xx_gmu_sptprac_is_on(struct adreno_device *adreno_dev);
+u64 a6xx_gmu_read_ao_counter(struct kgsl_device *device);
 #endif
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index b8a28fb..f535544 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -886,7 +886,7 @@ static int a6xx_gmu_wait_for_lowest_idle(struct kgsl_device *device)
 	unsigned long t;
 	uint64_t ts1, ts2, ts3;
 
-	ts1 = read_AO_counter(device);
+	ts1 = a6xx_gmu_read_ao_counter(device);
 
 	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
 	do {
@@ -901,7 +901,7 @@ static int a6xx_gmu_wait_for_lowest_idle(struct kgsl_device *device)
 		usleep_range(10, 100);
 	} while (!time_after(jiffies, t));
 
-	ts2 = read_AO_counter(device);
+	ts2 = a6xx_gmu_read_ao_counter(device);
 	/* Check one last time */
 
 	gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
@@ -910,7 +910,7 @@ static int a6xx_gmu_wait_for_lowest_idle(struct kgsl_device *device)
 	if (idle_trandition_complete(gmu->idle_level, reg, reg1))
 		return 0;
 
-	ts3 = read_AO_counter(device);
+	ts3 = a6xx_gmu_read_ao_counter(device);
 
 	/* Collect abort data to help with debugging */
 	gmu_core_regread(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg2);
@@ -954,14 +954,14 @@ static int a6xx_gmu_wait_for_idle(struct kgsl_device *device)
 	unsigned int status2;
 	uint64_t ts1;
 
-	ts1 = read_AO_counter(device);
+	ts1 = a6xx_gmu_read_ao_counter(device);
 	if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
 			0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
 		gmu_core_regread(device,
 				A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2, &status2);
 		dev_err(&gmu->pdev->dev,
 				"GMU not idling: status2=0x%x %llx %llx\n",
-				status2, ts1, read_AO_counter(device));
+				status2, ts1, a6xx_gmu_read_ao_counter(device));
 		return -ETIMEDOUT;
 	}
 
@@ -1722,6 +1722,31 @@ static int a6xx_gmu_wait_for_active_transition(
 	return -ETIMEDOUT;
 }
 
+/*
+ * a6xx_gmu_read_ao_counter() - Returns the 64bit always on counter value
+ *
+ * @device: Pointer to KGSL device
+ */
+u64 a6xx_gmu_read_ao_counter(struct kgsl_device *device)
+{
+	unsigned int l, h, h1;
+
+	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h);
+	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
+	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_H, &h1);
+
+	/*
+	 * If there's no change in COUNTER_H we have no overflow so return,
+	 * otherwise read COUNTER_L again
+	 */
+
+	if (h == h1)
+		return (uint64_t) l | ((uint64_t) h << 32);
+
+	gmu_core_regread(device, A6XX_GMU_CX_GMU_ALWAYS_ON_COUNTER_L, &l);
+	return (uint64_t) l | ((uint64_t) h1 << 32);
+}
+
 struct gmu_dev_ops adreno_a6xx_gmudev = {
 	.load_firmware = a6xx_gmu_load_firmware,
 	.oob_set = a6xx_gmu_oob_set,
@@ -1739,6 +1764,7 @@ struct gmu_dev_ops adreno_a6xx_gmudev = {
 	.snapshot = a6xx_gmu_snapshot,
 	.cooperative_reset = a6xx_gmu_cooperative_reset,
 	.wait_for_active_transition = a6xx_gmu_wait_for_active_transition,
+	.read_ao_counter = a6xx_gmu_read_ao_counter,
 	.gmu2host_intr_mask = HFI_IRQ_MASK,
 	.gmu_ao_intr_mask = GMU_AO_INT_MASK,
 };
diff --git a/drivers/gpu/msm/adreno_a6xx_rgmu.c b/drivers/gpu/msm/adreno_a6xx_rgmu.c
index a673d29..7a101c1 100644
--- a/drivers/gpu/msm/adreno_a6xx_rgmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_rgmu.c
@@ -259,7 +259,7 @@ static int a6xx_rgmu_wait_for_lowest_idle(struct kgsl_device *device)
 	if (rgmu->idle_level != GPU_HW_IFPC)
 		return 0;
 
-	ts1 = read_AO_counter(device);
+	ts1 = a6xx_gmu_read_ao_counter(device);
 
 	t = jiffies + msecs_to_jiffies(RGMU_IDLE_TIMEOUT);
 	do {
@@ -273,7 +273,7 @@ static int a6xx_rgmu_wait_for_lowest_idle(struct kgsl_device *device)
 		usleep_range(10, 100);
 	} while (!time_after(jiffies, t));
 
-	ts2 = read_AO_counter(device);
+	ts2 = a6xx_gmu_read_ao_counter(device);
 
 	/* Do one last read incase it succeeds */
 	gmu_core_regread(device,
@@ -282,7 +282,7 @@ static int a6xx_rgmu_wait_for_lowest_idle(struct kgsl_device *device)
 	if (reg[0] & GX_GDSC_POWER_OFF)
 		return 0;
 
-	ts3 = read_AO_counter(device);
+	ts3 = a6xx_gmu_read_ao_counter(device);
 
 	/* Collect abort data to help with debugging */
 	gmu_core_regread(device, A6XX_RGMU_CX_PCC_DEBUG, &reg[1]);
@@ -573,6 +573,7 @@ struct gmu_dev_ops adreno_a6xx_rgmudev = {
 	.ifpc_show = a6xx_rgmu_ifpc_show,
 	.snapshot = a6xx_rgmu_snapshot,
 	.halt_execution = a6xx_rgmu_halt_execution,
+	.read_ao_counter = a6xx_gmu_read_ao_counter,
 	.gmu2host_intr_mask = RGMU_OOB_IRQ_MASK,
 	.gmu_ao_intr_mask = RGMU_AO_IRQ_MASK,
 };
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index ca603d4..715750b 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -1603,6 +1603,8 @@ static void _a6xx_do_crashdump(struct kgsl_device *device)
 	if (a6xx_capturescript.gpuaddr == 0 ||
 		a6xx_crashdump_registers.gpuaddr == 0)
 		return;
+	if (!test_bit(KGSL_MMU_STARTED, &device->mmu.flags))
+		return;
 
 	/* IF the SMMU is stalled we cannot do a crash dump */
 	kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c
index 4e3bbfb..7e38399 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.c
+++ b/drivers/gpu/msm/kgsl_gmu_core.c
@@ -380,3 +380,13 @@ int gmu_core_dev_wait_for_active_transition(struct kgsl_device *device)
 
 	return 0;
 }
+
+u64 gmu_core_dev_read_ao_counter(struct kgsl_device *device)
+{
+	struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
+
+	if (ops && ops->read_ao_counter)
+		return ops->read_ao_counter(device);
+
+	return 0;
+}
diff --git a/drivers/gpu/msm/kgsl_gmu_core.h b/drivers/gpu/msm/kgsl_gmu_core.h
index 0818e71..24cb1d8 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.h
+++ b/drivers/gpu/msm/kgsl_gmu_core.h
@@ -146,6 +146,7 @@ struct gmu_dev_ops {
 	void (*cooperative_reset)(struct kgsl_device *device);
 	void (*halt_execution)(struct kgsl_device *device);
 	int (*wait_for_active_transition)(struct kgsl_device *device);
+	u64 (*read_ao_counter)(struct kgsl_device *device);
 	const unsigned int gmu2host_intr_mask;
 	const unsigned int gmu_ao_intr_mask;
 };
@@ -228,5 +229,6 @@ int gmu_core_dev_ifpc_store(struct kgsl_device *device, unsigned int val);
 void gmu_core_dev_prepare_stop(struct kgsl_device *device);
 int gmu_core_dev_wait_for_active_transition(struct kgsl_device *device);
 void gmu_core_dev_cooperative_reset(struct kgsl_device *device);
+u64 gmu_core_dev_read_ao_counter(struct kgsl_device *device);
 
 #endif /* __KGSL_GMU_CORE_H */
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index ac85b3d..977884d 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1287,6 +1287,8 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
 	_enable_gpuhtw_llc(mmu, iommu_pt);
 
 	ret = _attach_pt(iommu_pt, ctx);
+	if (ret)
+		goto done;
 
 	if (MMU_FEATURE(mmu, KGSL_MMU_HYP_SECURE_ALLOC))
 		iommu_set_fault_handler(iommu_pt->domain,
@@ -1295,8 +1297,8 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
 	ret = iommu_domain_get_attr(iommu_pt->domain,
 				DOMAIN_ATTR_CONTEXT_BANK, &cb_num);
 	if (ret) {
-		dev_err(device->dev, "get DOMAIN_ATTR_PROCID failed: %d\n",
-				ret);
+		dev_err(device->dev, "get DOMAIN_ATTR_CONTEXT_BANK failed: %d\n",
+			ret);
 		goto done;
 	}
 
@@ -1675,7 +1677,9 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
 	}
 
 	/* Make sure the hardware is programmed to the default pagetable */
-	return kgsl_iommu_set_pt(mmu, mmu->defaultpagetable);
+	kgsl_iommu_set_pt(mmu, mmu->defaultpagetable);
+	set_bit(KGSL_MMU_STARTED, &mmu->flags);
+	return 0;
 }
 
 static int
@@ -2052,6 +2056,8 @@ static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
 		for (i = 0; i < KGSL_IOMMU_CONTEXT_MAX; i++)
 			_detach_context(&iommu->ctx[i]);
 	}
+
+	clear_bit(KGSL_MMU_STARTED, &mmu->flags);
 }
 
 static u64
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index df9fe39..60d012ce 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -695,6 +695,7 @@ static struct kgsl_pagetable *nommu_getpagetable(struct kgsl_mmu *mmu,
 static int nommu_init(struct kgsl_mmu *mmu)
 {
 	mmu->features |= KGSL_MMU_GLOBAL_PAGETABLE;
+	set_bit(KGSL_MMU_STARTED, &mmu->flags);
 	return 0;
 }
 
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 93012aa..7cc3321 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -164,6 +164,9 @@ struct kgsl_mmu {
 	} priv;
 };
 
+/* KGSL MMU FLAGS */
+#define KGSL_MMU_STARTED BIT(0)
+
 #define KGSL_IOMMU_PRIV(_device) (&((_device)->mmu.priv.iommu))
 
 extern struct kgsl_mmu_ops kgsl_iommu_ops;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 72cbab2..a20de48 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -573,9 +573,12 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
 	}
 
 	/* Disable the TMC if need be */
-	if (drvdata->mode == CS_MODE_SYSFS)
+	if (drvdata->mode == CS_MODE_SYSFS) {
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+		coresight_disable_all_source_link();
+		spin_lock_irqsave(&drvdata->spinlock, flags);
 		tmc_etb_disable_hw(drvdata);
-
+	}
 	drvdata->reading = true;
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -605,6 +608,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
 		}
 	}
 
+	drvdata->reading = false;
 	/* Re-enable the TMC if need be */
 	if (drvdata->mode == CS_MODE_SYSFS) {
 		/*
@@ -617,6 +621,9 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
 		 */
 		memset(drvdata->buf, 0, drvdata->size);
 		tmc_etb_enable_hw(drvdata);
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+		coresight_enable_all_source_link();
+		spin_lock_irqsave(&drvdata->spinlock, flags);
 	} else {
 		/*
 		 * The ETB/ETF is not tracing and the buffer was just read.
@@ -626,7 +633,6 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
 		drvdata->buf = NULL;
 	}
 
-	drvdata->reading = false;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
 	/*
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 81cecd5..f74f00e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1617,9 +1617,13 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
 	}
 
 	/* Disable the TMC if need be */
-	if (drvdata->mode == CS_MODE_SYSFS)
-		tmc_etr_disable_hw(drvdata, true);
+	if (drvdata->mode == CS_MODE_SYSFS) {
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+		coresight_disable_all_source_link();
+		spin_lock_irqsave(&drvdata->spinlock, flags);
 
+		tmc_etr_disable_hw(drvdata, true);
+	}
 	drvdata->reading = true;
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1639,6 +1643,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
 	mutex_lock(&drvdata->mem_lock);
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 
+	drvdata->reading = false;
 	/* RE-enable the TMC if need be */
 	if (drvdata->mode == CS_MODE_SYSFS) {
 		/*
@@ -1647,6 +1652,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
 		 * be NULL.
 		 */
 		tmc_etr_enable_hw(drvdata);
+
+		spin_unlock_irqrestore(&drvdata->spinlock, flags);
+		coresight_enable_all_source_link();
+		spin_lock_irqsave(&drvdata->spinlock, flags);
 	} else {
 		/*
 		 * The ETR is not tracing and the buffer was just read.
@@ -1656,14 +1665,12 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
 		drvdata->etr_buf = NULL;
 	}
 
-	drvdata->reading = false;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
 	/* Free allocated memory out side of the spinlock */
 	if (etr_buf)
 		tmc_free_etr_buf(etr_buf);
 
-
 	mutex_unlock(&drvdata->mem_lock);
 	return 0;
 }
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 174ef74..f99efc1 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -62,6 +62,8 @@ static LIST_HEAD(cs_disabled_link);
 
 static LIST_HEAD(cs_active_paths);
 
+static struct coresight_device *activated_sink;
+
 /*
  * When losing synchronisation a new barrier packet needs to be inserted at the
  * beginning of the data collected in a buffer.  That way the decoder knows that
@@ -850,6 +852,129 @@ int coresight_store_path(struct coresight_device *csdev, struct list_head *path)
 	return 0;
 }
 
+static void coresight_enable_source_link(struct list_head *path)
+{
+	u32 type;
+	int ret;
+	struct coresight_node *nd;
+	struct coresight_device *csdev, *parent, *child;
+
+	list_for_each_entry_reverse(nd, path, link) {
+		csdev = nd->csdev;
+		type = csdev->type;
+
+		if (type == CORESIGHT_DEV_TYPE_LINKSINK)
+			type = (csdev == coresight_get_sink(path)) ?
+						CORESIGHT_DEV_TYPE_SINK :
+						CORESIGHT_DEV_TYPE_LINK;
+
+		switch (type) {
+		case CORESIGHT_DEV_TYPE_SINK:
+			break;
+		case CORESIGHT_DEV_TYPE_SOURCE:
+			if (source_ops(csdev)->enable) {
+				ret = coresight_enable_reg_clk(csdev);
+				if (ret)
+					goto err;
+
+				ret = source_ops(csdev)->enable(csdev,
+					NULL, CS_MODE_SYSFS);
+				if (ret) {
+					coresight_disable_reg_clk(csdev);
+					goto err;
+				}
+			}
+			csdev->enable = true;
+			break;
+		case CORESIGHT_DEV_TYPE_LINK:
+			parent = list_prev_entry(nd, link)->csdev;
+			child = list_next_entry(nd, link)->csdev;
+			ret = coresight_enable_link(csdev, parent, child, path);
+			if (ret)
+				goto err;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return;
+err:
+	coresight_disable_previous_devs(path, nd);
+	coresight_release_path(csdev, path);
+}
+
+static void coresight_disable_source_link(struct list_head *path)
+{
+	u32 type;
+	struct coresight_node *nd;
+	struct coresight_device *csdev, *parent, *child;
+
+	list_for_each_entry(nd, path, link) {
+		csdev = nd->csdev;
+		type = csdev->type;
+
+		if (type == CORESIGHT_DEV_TYPE_LINKSINK)
+			type = (csdev == coresight_get_sink(path)) ?
+						CORESIGHT_DEV_TYPE_SINK :
+						CORESIGHT_DEV_TYPE_LINK;
+
+		switch (type) {
+		case CORESIGHT_DEV_TYPE_SINK:
+			break;
+		case CORESIGHT_DEV_TYPE_SOURCE:
+			if (source_ops(csdev)->disable) {
+				source_ops(csdev)->disable(csdev, NULL);
+				coresight_disable_reg_clk(csdev);
+			}
+			csdev->enable = false;
+			break;
+		case CORESIGHT_DEV_TYPE_LINK:
+			parent = list_prev_entry(nd, link)->csdev;
+			child = list_next_entry(nd, link)->csdev;
+			coresight_disable_link(csdev, parent, child, path);
+			break;
+		default:
+			break;
+		}
+	}
+}
+void coresight_disable_all_source_link(void)
+{
+	struct coresight_path *cspath = NULL;
+	struct coresight_path *cspath_next = NULL;
+
+	mutex_lock(&coresight_mutex);
+
+	list_for_each_entry_safe(cspath, cspath_next, &cs_active_paths, link) {
+		coresight_disable_source_link(cspath->path);
+	}
+
+	activated_sink = coresight_get_enabled_sink(false);
+	if (activated_sink)
+		activated_sink->activated = false;
+
+	mutex_unlock(&coresight_mutex);
+}
+
+void coresight_enable_all_source_link(void)
+{
+	struct coresight_path *cspath = NULL;
+	struct coresight_path *cspath_next = NULL;
+
+	mutex_lock(&coresight_mutex);
+
+	list_for_each_entry_safe(cspath, cspath_next, &cs_active_paths, link) {
+		coresight_enable_source_link(cspath->path);
+	}
+
+	if (activated_sink && activated_sink->enable)
+		activated_sink->activated = true;
+
+	activated_sink = NULL;
+	mutex_unlock(&coresight_mutex);
+}
+
 int coresight_enable(struct coresight_device *csdev)
 {
 	int ret = 0;
@@ -959,18 +1084,32 @@ static ssize_t enable_sink_store(struct device *dev,
 	int ret;
 	unsigned long val;
 	struct coresight_device *csdev = to_coresight_device(dev);
+	struct coresight_device *sink = NULL;
 
 	ret = kstrtoul(buf, 10, &val);
 	if (ret)
 		return ret;
+	mutex_lock(&coresight_mutex);
 
-	if (val)
+	if (val) {
+		sink = activated_sink ? activated_sink :
+			coresight_get_enabled_sink(false);
+		if (sink && strcmp(dev_name(&sink->dev),
+				dev_name(&csdev->dev)))
+			goto err;
 		csdev->activated = true;
-	else
+	} else {
+		if (csdev->enable)
+			goto err;
 		csdev->activated = false;
+	}
+	mutex_unlock(&coresight_mutex);
 
 	return size;
 
+err:
+	mutex_unlock(&coresight_mutex);
+	return -EINVAL;
 }
 static DEVICE_ATTR_RW(enable_sink);
 
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 3cd43f5..c9ef2cc 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -98,6 +98,8 @@ struct geni_i2c_dev {
 	int clk_fld_idx;
 	struct dma_chan *tx_c;
 	struct dma_chan *rx_c;
+	struct msm_gpi_tre lock_t;
+	struct msm_gpi_tre unlock_t;
 	struct msm_gpi_tre cfg0_t;
 	struct msm_gpi_tre go_t;
 	struct msm_gpi_tre tx_t;
@@ -370,9 +372,9 @@ static void gi2c_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb_str,
 	}
 	if (cb_str->cb_event != MSM_GPI_QUP_NOTIFY)
 		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
-				"GSI QN err:0x%x, status:0x%x, err:%d\n",
-				cb_str->error_log.error_code,
-				m_stat, cb_str->cb_event);
+			"GSI QN err:0x%x, status:0x%x, err:%d slv_addr: 0x%x R/W: %d\n",
+			cb_str->error_log.error_code, m_stat,
+			cb_str->cb_event, gi2c->cur->addr, gi2c->cur->flags);
 }
 
 static void gi2c_gsi_cb_err(struct msm_gpi_dma_async_tx_cb_param *cb,
@@ -398,7 +400,9 @@ static void gi2c_gsi_tx_cb(void *ptr)
 	struct msm_gpi_dma_async_tx_cb_param *tx_cb = ptr;
 	struct geni_i2c_dev *gi2c = tx_cb->userdata;
 
-	if (!(gi2c->cur->flags & I2C_M_RD)) {
+	if (tx_cb->completion_code == MSM_GPI_TCE_EOB) {
+		complete(&gi2c->xfer);
+	} else if (!(gi2c->cur->flags & I2C_M_RD)) {
 		gi2c_gsi_cb_err(tx_cb, "TX");
 		complete(&gi2c->xfer);
 	}
@@ -460,6 +464,23 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 		}
 	}
 
+	if (gi2c->is_shared) {
+		struct msm_gpi_tre *lock_t = &gi2c->lock_t;
+		struct msm_gpi_tre *unlock_t = &gi2c->unlock_t;
+
+		/* lock */
+		lock_t->dword[0] = MSM_GPI_LOCK_TRE_DWORD0;
+		lock_t->dword[1] = MSM_GPI_LOCK_TRE_DWORD1;
+		lock_t->dword[2] = MSM_GPI_LOCK_TRE_DWORD2;
+		lock_t->dword[3] = MSM_GPI_LOCK_TRE_DWORD3(0, 0, 0, 0, 1);
+
+		/* unlock */
+		unlock_t->dword[0] = MSM_GPI_UNLOCK_TRE_DWORD0;
+		unlock_t->dword[1] = MSM_GPI_UNLOCK_TRE_DWORD1;
+		unlock_t->dword[2] = MSM_GPI_UNLOCK_TRE_DWORD2;
+		unlock_t->dword[3] = MSM_GPI_UNLOCK_TRE_DWORD3(0, 0, 0, 1, 0);
+	}
+
 	if (!gi2c->cfg_sent) {
 		struct geni_i2c_clk_fld *itr = geni_i2c_clk_map +
 							gi2c->clk_fld_idx;
@@ -499,24 +520,34 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 		}
 
 		qcom_geni_i2c_calc_timeout(gi2c);
-		if (!gi2c->cfg_sent) {
+
+		if (!gi2c->cfg_sent)
 			segs++;
+		if (gi2c->is_shared && (i == 0 || i == num-1)) {
+			segs++;
+			if (num == 1)
+				segs++;
 			sg_init_table(gi2c->tx_sg, segs);
-			sg_set_buf(gi2c->tx_sg, &gi2c->cfg0_t,
-						sizeof(gi2c->cfg0_t));
-			gi2c->cfg_sent = 1;
-			index++;
+			if (i == 0)
+				sg_set_buf(&gi2c->tx_sg[index++], &gi2c->lock_t,
+					sizeof(gi2c->lock_t));
 		} else {
 			sg_init_table(gi2c->tx_sg, segs);
 		}
 
+		if (!gi2c->cfg_sent) {
+			sg_set_buf(&gi2c->tx_sg[index++], &gi2c->cfg0_t,
+						sizeof(gi2c->cfg0_t));
+			gi2c->cfg_sent = 1;
+		}
+
 		go_t->dword[0] = MSM_GPI_I2C_GO_TRE_DWORD0((stretch << 2),
 							   msgs[i].addr, op);
 		go_t->dword[1] = MSM_GPI_I2C_GO_TRE_DWORD1;
 
 		if (msgs[i].flags & I2C_M_RD) {
 			go_t->dword[2] = MSM_GPI_I2C_GO_TRE_DWORD2(msgs[i].len);
-			go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(1, 0, 0, 1,
+			go_t->dword[3] = MSM_GPI_I2C_GO_TRE_DWORD3(1, 0, 0, 0,
 									0);
 		} else {
 			go_t->dword[2] = MSM_GPI_I2C_GO_TRE_DWORD2(0);
@@ -588,13 +619,22 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 				MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(gi2c->tx_ph);
 			gi2c->tx_t.dword[2] =
 				MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(msgs[i].len);
-			gi2c->tx_t.dword[3] =
+			if (gi2c->is_shared && i == num-1)
+				gi2c->tx_t.dword[3] =
+				MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, 1, 0, 1);
+			else
+				gi2c->tx_t.dword[3] =
 				MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, 1, 0, 0);
 
 			sg_set_buf(&gi2c->tx_sg[index++], &gi2c->tx_t,
 							  sizeof(gi2c->tx_t));
 		}
 
+		if (gi2c->is_shared && i == num-1) {
+			sg_set_buf(&gi2c->tx_sg[index++],
+				&gi2c->unlock_t, sizeof(gi2c->unlock_t));
+		}
+
 		gi2c->tx_desc = dmaengine_prep_slave_sg(gi2c->tx_c, gi2c->tx_sg,
 						segs, DMA_MEM_TO_DEV,
 						(DMA_PREP_INTERRUPT |
@@ -616,8 +656,9 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 						gi2c->xfer_timeout);
 		if (!timeout) {
 			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
-				    "GSI Txn timed out: %u len: %d\n",
-					gi2c->xfer_timeout, gi2c->cur->len);
+				    "GSI Txn timed out: %u len: %d slv:addr: 0x%x R/W: %d\n",
+					gi2c->xfer_timeout, gi2c->cur->len,
+					gi2c->cur->addr, gi2c->cur->flags);
 			geni_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base,
 						gi2c->ipcl);
 			gi2c->err = -ETIMEDOUT;
@@ -666,6 +707,9 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
 	if (gi2c->se_mode == GSI_ONLY) {
 		ret = geni_i2c_gsi_xfer(adap, msgs, num);
 		goto geni_i2c_txn_ret;
+	} else {
+		/* Don't set shared flag in non-GSI mode */
+		gi2c->is_shared = false;
 	}
 
 	qcom_geni_i2c_conf(gi2c, 0);
@@ -1001,7 +1045,7 @@ static int geni_i2c_runtime_resume(struct device *dev)
 	if (!gi2c->ipcl) {
 		char ipc_name[I2C_NAME_SIZE];
 
-		snprintf(ipc_name, I2C_NAME_SIZE, "i2c-%d", gi2c->adap.nr);
+		snprintf(ipc_name, I2C_NAME_SIZE, "%s", dev_name(gi2c->dev));
 		gi2c->ipcl = ipc_log_context_create(2, ipc_name, 0);
 	}
 
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 11f7366..85bb21b 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1337,4 +1337,18 @@
 
 source "drivers/input/touchscreen/synaptics_dsx/Kconfig"
 
+config TOUCHSCREEN_SYNAPTICS_TCM
+	bool "Synaptics TCM Touchscreen Driver"
+	depends on I2C
+	default y
+	help
+	  Say Y here if you have a Synaptics Touchscreen.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_tcm.
+
+source "drivers/input/touchscreen/synaptics_tcm/Kconfig"
+
 endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index a120f92..8a2d92a 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -111,3 +111,4 @@
 obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023)	+= rohm_bu21023.o
 obj-$(CONFIG_TOUCHSCREEN_ST)		+= st/
 obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX) += synaptics_dsx/
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_TCM)	+= synaptics_tcm/
diff --git a/drivers/input/touchscreen/synaptics_tcm/Kconfig b/drivers/input/touchscreen/synaptics_tcm/Kconfig
new file mode 100644
index 0000000..78c687d
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/Kconfig
@@ -0,0 +1,135 @@
+#
+# Synaptics TCM touchscreen driver configuration
+#
+menuconfig TOUCHSCREEN_SYNAPTICS_TCM
+	bool "Synaptics TCM touchscreen"
+	default y
+	help
+	  Say Y here if you have a Synaptics TCM touchscreen connected
+	  to your system.
+
+	  If unsure, say N.
+
+if TOUCHSCREEN_SYNAPTICS_TCM
+
+choice
+	default TOUCHSCREEN_SYNAPTICS_TCM_I2C
+	prompt "Synaptics TCM bus module"
+config TOUCHSCREEN_SYNAPTICS_TCM_I2C
+	bool "I2C"
+	depends on I2C
+	help
+	  Say Y here to use I2C bus for communication.
+
+	  Else, say N.
+
+	  This will configure I2C bus for communicating
+	  with touch controller.
+
+config TOUCHSCREEN_SYNAPTICS_TCM_SPI
+	bool "SPI"
+	depends on SPI_MASTER
+	help
+	  Say Y here to use SPI bus for communication.
+
+	  Else, say N.
+
+	  This will configure SPI bus for communicating
+	  with touch controller.
+
+endchoice
+
+config TOUCHSCREEN_SYNAPTICS_TCM_CORE
+	tristate "Synaptics TCM core module"
+	depends on I2C || SPI_MASTER
+	default y
+	help
+	  Say Y here to enable core functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_tcm_core.
+
+config TOUCHSCREEN_SYNAPTICS_TCM_TOUCH
+	tristate "Synaptics TCM touch module"
+	depends on TOUCHSCREEN_SYNAPTICS_TCM_CORE
+	default y
+	help
+	  Say Y here to enable support for touch reporting.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_tcm_touch.
+
+config TOUCHSCREEN_SYNAPTICS_TCM_DEVICE
+	tristate "Synaptics TCM device module"
+	depends on TOUCHSCREEN_SYNAPTICS_TCM_CORE
+	default y
+	help
+	  Say Y here to enable support for TCM device functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_tcm_device.
+
+config TOUCHSCREEN_SYNAPTICS_TCM_TESTING
+	tristate "Synaptics TCM testing module"
+	depends on TOUCHSCREEN_SYNAPTICS_TCM_CORE
+	help
+	  Say Y here to enable support for testing functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_tcm_testing.
+
+config TOUCHSCREEN_SYNAPTICS_TCM_REFLASH
+	tristate "Synaptics TCM reflash module"
+	depends on TOUCHSCREEN_SYNAPTICS_TCM_CORE
+	default y
+	help
+	  Say Y here to enable support for reflash functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_tcm_reflash.
+
+config TOUCHSCREEN_SYNAPTICS_TCM_RECOVERY
+	tristate "Synaptics TCM recovery module"
+	depends on TOUCHSCREEN_SYNAPTICS_TCM_CORE
+	default y
+	help
+	  Say Y here to enable support for recovery functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_tcm_recovery.
+
+config TOUCHSCREEN_SYNAPTICS_TCM_ZEROFLASH
+	tristate "Synaptics TCM zeroflash module"
+	depends on TOUCHSCREEN_SYNAPTICS_TCM_CORE
+	help
+	  Say Y here to enable support for ZeroFlash functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_tcm_zeroflash.
+
+config TOUCHSCREEN_SYNAPTICS_TCM_DIAGNOSTICS
+	tristate "Synaptics TCM diagnostics module"
+	depends on TOUCHSCREEN_SYNAPTICS_TCM_CORE
+	default y
+	help
+	  Say Y here to enable support for diagnostics functionality.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called synaptics_tcm_diagnostics.
+endif
diff --git a/drivers/input/touchscreen/synaptics_tcm/Makefile b/drivers/input/touchscreen/synaptics_tcm/Makefile
new file mode 100644
index 0000000..30db3b5
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for the Synaptics TCM touchscreen driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_SPI) += synaptics_tcm_spi.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_I2C) += synaptics_tcm_i2c.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_CORE) += synaptics_tcm_core.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_TOUCH) += synaptics_tcm_touch.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_DEVICE) += synaptics_tcm_device.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_TESTING) += synaptics_tcm_testing.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_REFLASH) += synaptics_tcm_reflash.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_RECOVERY) += synaptics_tcm_recovery.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_ZEROFLASH) += synaptics_tcm_zeroflash.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_DIAGNOSTICS) += synaptics_tcm_diagnostics.o
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_core.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_core.c
new file mode 100644
index 0000000..9c965a2
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_core.c
@@ -0,0 +1,3731 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/gpio.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/consumer.h>
+#include "synaptics_tcm_core.h"
+
+/* #define RESET_ON_RESUME */
+
+/* #define RESUME_EARLY_UNBLANK */
+
+#define RESET_ON_RESUME_DELAY_MS 50
+
+#define PREDICTIVE_READING
+
+#define MIN_READ_LENGTH 9
+
+#define KEEP_DRIVER_ON_ERROR
+
+/* #define FORCE_RUN_APPLICATION_FIRMWARE */
+
+#define NOTIFIER_PRIORITY 2
+
+#define RESPONSE_TIMEOUT_MS 3000
+
+#define APP_STATUS_POLL_TIMEOUT_MS 1000
+
+#define APP_STATUS_POLL_MS 100
+
+#define ENABLE_IRQ_DELAY_MS 20
+
+#define FALL_BACK_ON_POLLING
+
+#define POLLING_DELAY_MS 5
+
+#define RUN_WATCHDOG true
+
+#define WATCHDOG_TRIGGER_COUNT 2
+
+#define WATCHDOG_DELAY_MS 50000
+
+#define MODE_SWITCH_DELAY_MS 100
+
+#define READ_RETRY_US_MIN 5000
+
+#define READ_RETRY_US_MAX 10000
+
+#define WRITE_DELAY_US_MIN 500
+
+#define WRITE_DELAY_US_MAX 1000
+
+#define HOST_DOWNLOAD_WAIT_MS 100
+
+#define HOST_DOWNLOAD_TIMEOUT_MS 1000
+
+#define DYNAMIC_CONFIG_SYSFS_DIR_NAME "dynamic_config"
+
+#define dynamic_config_sysfs(c_name, id) \
+static ssize_t syna_tcm_sysfs_##c_name##_show(struct device *dev, \
+		struct device_attribute *attr, char *buf) \
+{ \
+	int retval; \
+	unsigned short value; \
+	struct device *p_dev; \
+	struct kobject *p_kobj; \
+	struct syna_tcm_hcd *tcm_hcd; \
+\
+	p_kobj = sysfs_dir->parent; \
+	p_dev = container_of(p_kobj, struct device, kobj); \
+	tcm_hcd = dev_get_drvdata(p_dev); \
+\
+	mutex_lock(&tcm_hcd->extif_mutex); \
+\
+	retval = tcm_hcd->get_dynamic_config(tcm_hcd, id, &value); \
+	if (retval < 0) { \
+		LOGE(tcm_hcd->pdev->dev.parent, \
+				"Failed to get dynamic config\n"); \
+		goto exit; \
+	} \
+\
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", value); \
+\
+exit: \
+	mutex_unlock(&tcm_hcd->extif_mutex); \
+\
+	return retval; \
+} \
+\
+static ssize_t syna_tcm_sysfs_##c_name##_store(struct device *dev, \
+		struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+	int retval; \
+	unsigned int input; \
+	struct device *p_dev; \
+	struct kobject *p_kobj; \
+	struct syna_tcm_hcd *tcm_hcd; \
+\
+	p_kobj = sysfs_dir->parent; \
+	p_dev = container_of(p_kobj, struct device, kobj); \
+	tcm_hcd = dev_get_drvdata(p_dev); \
+\
+	if (kstrtouint(buf, 10, &input)) \
+		return -EINVAL; \
+\
+	mutex_lock(&tcm_hcd->extif_mutex); \
+\
+	retval = tcm_hcd->set_dynamic_config(tcm_hcd, id, input); \
+	if (retval < 0) { \
+		LOGE(tcm_hcd->pdev->dev.parent, \
+				"Failed to set dynamic config\n"); \
+		goto exit; \
+	} \
+\
+	retval = count; \
+\
+exit: \
+	mutex_unlock(&tcm_hcd->extif_mutex); \
+\
+	return retval; \
+}
+
+DECLARE_COMPLETION(response_complete);
+
+static struct kobject *sysfs_dir;
+
+static struct syna_tcm_module_pool mod_pool;
+
+SHOW_PROTOTYPE(syna_tcm, info);
+STORE_PROTOTYPE(syna_tcm, irq_en);
+STORE_PROTOTYPE(syna_tcm, reset);
+STORE_PROTOTYPE(syna_tcm, watchdog);
+SHOW_STORE_PROTOTYPE(syna_tcm, no_doze);
+SHOW_STORE_PROTOTYPE(syna_tcm, disable_noise_mitigation);
+SHOW_STORE_PROTOTYPE(syna_tcm, inhibit_frequency_shift);
+SHOW_STORE_PROTOTYPE(syna_tcm, requested_frequency);
+SHOW_STORE_PROTOTYPE(syna_tcm, disable_hsync);
+SHOW_STORE_PROTOTYPE(syna_tcm, rezero_on_exit_deep_sleep);
+SHOW_STORE_PROTOTYPE(syna_tcm, charger_connected);
+SHOW_STORE_PROTOTYPE(syna_tcm, no_baseline_relaxation);
+SHOW_STORE_PROTOTYPE(syna_tcm, in_wakeup_gesture_mode);
+SHOW_STORE_PROTOTYPE(syna_tcm, stimulus_fingers);
+SHOW_STORE_PROTOTYPE(syna_tcm, grip_suppression_enabled);
+SHOW_STORE_PROTOTYPE(syna_tcm, enable_thick_glove);
+SHOW_STORE_PROTOTYPE(syna_tcm, enable_glove);
+
+static struct device_attribute *attrs[] = {
+	ATTRIFY(info),
+	ATTRIFY(irq_en),
+	ATTRIFY(reset),
+	ATTRIFY(watchdog),
+};
+
+static struct device_attribute *dynamic_config_attrs[] = {
+	ATTRIFY(no_doze),
+	ATTRIFY(disable_noise_mitigation),
+	ATTRIFY(inhibit_frequency_shift),
+	ATTRIFY(requested_frequency),
+	ATTRIFY(disable_hsync),
+	ATTRIFY(rezero_on_exit_deep_sleep),
+	ATTRIFY(charger_connected),
+	ATTRIFY(no_baseline_relaxation),
+	ATTRIFY(in_wakeup_gesture_mode),
+	ATTRIFY(stimulus_fingers),
+	ATTRIFY(grip_suppression_enabled),
+	ATTRIFY(enable_thick_glove),
+	ATTRIFY(enable_glove),
+};
+
+static ssize_t syna_tcm_sysfs_info_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned int count;
+	struct device *p_dev;
+	struct kobject *p_kobj;
+	struct syna_tcm_hcd *tcm_hcd;
+
+	p_kobj = sysfs_dir->parent;
+	p_dev = container_of(p_kobj, struct device, kobj);
+	tcm_hcd = dev_get_drvdata(p_dev);
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	retval = tcm_hcd->identify(tcm_hcd, true);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do identification\n");
+		goto exit;
+	}
+
+	count = 0;
+
+	retval = snprintf(buf, PAGE_SIZE - count,
+			"TouchComm version:  %d\n",
+			tcm_hcd->id_info.version);
+	if (retval < 0)
+		goto exit;
+
+	buf += retval;
+	count += retval;
+
+	if (SYNAPTICS_TCM_ID_SUBVERSION == 0) {
+		retval = snprintf(buf, PAGE_SIZE - count,
+				"Driver version:     %d.%d\n",
+				(unsigned char)(SYNAPTICS_TCM_ID_VERSION >> 8),
+				(unsigned char)SYNAPTICS_TCM_ID_VERSION);
+	} else {
+		retval = snprintf(buf, PAGE_SIZE - count,
+				"Driver version:     %d.%d.%d\n",
+				(unsigned char)(SYNAPTICS_TCM_ID_VERSION >> 8),
+				(unsigned char)SYNAPTICS_TCM_ID_VERSION,
+				SYNAPTICS_TCM_ID_SUBVERSION);
+	}
+	if (retval < 0)
+		goto exit;
+
+	buf += retval;
+	count += retval;
+
+	switch (tcm_hcd->id_info.mode) {
+	case MODE_APPLICATION:
+		retval = snprintf(buf, PAGE_SIZE - count,
+				"Firmware mode:      Application\n");
+		if (retval < 0)
+			goto exit;
+		break;
+	case MODE_HOST_DOWNLOAD:
+		retval = snprintf(buf, PAGE_SIZE - count,
+				"Firmware mode:      Host Download\n");
+		if (retval < 0)
+			goto exit;
+		break;
+	case MODE_BOOTLOADER:
+		retval = snprintf(buf, PAGE_SIZE - count,
+				"Firmware mode:      Bootloader\n");
+		if (retval < 0)
+			goto exit;
+		break;
+	case MODE_TDDI_BOOTLOADER:
+		retval = snprintf(buf, PAGE_SIZE - count,
+				"Firmware mode:      TDDI Bootloader\n");
+		if (retval < 0)
+			goto exit;
+		break;
+	default:
+		retval = snprintf(buf, PAGE_SIZE - count,
+				"Firmware mode:      Unknown (%d)\n",
+				tcm_hcd->id_info.mode);
+		if (retval < 0)
+			goto exit;
+		break;
+	}
+	buf += retval;
+	count += retval;
+
+	retval = snprintf(buf, PAGE_SIZE - count,
+			"Part number:        ");
+	if (retval < 0)
+		goto exit;
+
+	buf += retval;
+	count += retval;
+
+	retval = secure_memcpy(buf,
+			PAGE_SIZE - count,
+			tcm_hcd->id_info.part_number,
+			sizeof(tcm_hcd->id_info.part_number),
+			sizeof(tcm_hcd->id_info.part_number));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy part number string\n");
+		goto exit;
+	}
+	buf += sizeof(tcm_hcd->id_info.part_number);
+	count += sizeof(tcm_hcd->id_info.part_number);
+
+	retval = snprintf(buf, PAGE_SIZE - count,
+			"\n");
+	if (retval < 0)
+		goto exit;
+
+	buf += retval;
+	count += retval;
+
+	retval = snprintf(buf, PAGE_SIZE - count,
+			"Packrat number:     %d\n",
+			tcm_hcd->packrat_number);
+	if (retval < 0)
+		goto exit;
+
+	count += retval;
+
+	retval = count;
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t syna_tcm_sysfs_irq_en_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct device *p_dev;
+	struct kobject *p_kobj;
+	struct syna_tcm_hcd *tcm_hcd;
+
+	p_kobj = sysfs_dir->parent;
+	p_dev = container_of(p_kobj, struct device, kobj);
+	tcm_hcd = dev_get_drvdata(p_dev);
+
+	if (kstrtouint(buf, 10, &input))
+		return -EINVAL;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	if (input == 0) {
+		retval = tcm_hcd->enable_irq(tcm_hcd, false, true);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to disable interrupt\n");
+			goto exit;
+		}
+	} else if (input == 1) {
+		retval = tcm_hcd->enable_irq(tcm_hcd, true, NULL);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to enable interrupt\n");
+			goto exit;
+		}
+	} else {
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t syna_tcm_sysfs_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	bool hw_reset;
+	unsigned int input;
+	struct device *p_dev;
+	struct kobject *p_kobj;
+	struct syna_tcm_hcd *tcm_hcd;
+
+	p_kobj = sysfs_dir->parent;
+	p_dev = container_of(p_kobj, struct device, kobj);
+	tcm_hcd = dev_get_drvdata(p_dev);
+
+	if (kstrtouint(buf, 10, &input))
+		return -EINVAL;
+
+	if (input == 1)
+		hw_reset = false;
+	else if (input == 2)
+		hw_reset = true;
+	else
+		return -EINVAL;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	retval = tcm_hcd->reset(tcm_hcd, hw_reset, true);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do reset\n");
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t syna_tcm_sysfs_watchdog_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct device *p_dev;
+	struct kobject *p_kobj;
+	struct syna_tcm_hcd *tcm_hcd;
+
+	p_kobj = sysfs_dir->parent;
+	p_dev = container_of(p_kobj, struct device, kobj);
+	tcm_hcd = dev_get_drvdata(p_dev);
+
+	if (kstrtouint(buf, 10, &input))
+		return -EINVAL;
+
+	if (input != 0 && input != 1)
+		return -EINVAL;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	tcm_hcd->watchdog.run = input;
+	tcm_hcd->update_watchdog(tcm_hcd, input);
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return count;
+}
+
+dynamic_config_sysfs(no_doze, DC_NO_DOZE)
+
+dynamic_config_sysfs(disable_noise_mitigation, DC_DISABLE_NOISE_MITIGATION)
+
+dynamic_config_sysfs(inhibit_frequency_shift, DC_INHIBIT_FREQUENCY_SHIFT)
+
+dynamic_config_sysfs(requested_frequency, DC_REQUESTED_FREQUENCY)
+
+dynamic_config_sysfs(disable_hsync, DC_DISABLE_HSYNC)
+
+dynamic_config_sysfs(rezero_on_exit_deep_sleep, DC_REZERO_ON_EXIT_DEEP_SLEEP)
+
+dynamic_config_sysfs(charger_connected, DC_CHARGER_CONNECTED)
+
+dynamic_config_sysfs(no_baseline_relaxation, DC_NO_BASELINE_RELAXATION)
+
+dynamic_config_sysfs(in_wakeup_gesture_mode, DC_IN_WAKEUP_GESTURE_MODE)
+
+dynamic_config_sysfs(stimulus_fingers, DC_STIMULUS_FINGERS)
+
+dynamic_config_sysfs(grip_suppression_enabled, DC_GRIP_SUPPRESSION_ENABLED)
+
+dynamic_config_sysfs(enable_thick_glove, DC_ENABLE_THICK_GLOVE)
+
+dynamic_config_sysfs(enable_glove, DC_ENABLE_GLOVE)
+
+int syna_tcm_add_module(struct syna_tcm_module_cb *mod_cb, bool insert)
+{
+	struct syna_tcm_module_handler *mod_handler;
+
+	if (!mod_pool.initialized) {
+		mutex_init(&mod_pool.mutex);
+		INIT_LIST_HEAD(&mod_pool.list);
+		mod_pool.initialized = true;
+	}
+
+	mutex_lock(&mod_pool.mutex);
+
+	if (insert) {
+		mod_handler = kzalloc(sizeof(*mod_handler), GFP_KERNEL);
+		if (!mod_handler) {
+			mutex_unlock(&mod_pool.mutex);
+			return -ENOMEM;
+		}
+		mod_handler->mod_cb = mod_cb;
+		mod_handler->insert = true;
+		mod_handler->detach = false;
+		list_add_tail(&mod_handler->link, &mod_pool.list);
+	} else if (!list_empty(&mod_pool.list)) {
+		list_for_each_entry(mod_handler, &mod_pool.list, link) {
+			if (mod_handler->mod_cb->type == mod_cb->type) {
+				mod_handler->insert = false;
+				mod_handler->detach = true;
+				goto exit;
+			}
+		}
+	}
+
+exit:
+	mutex_unlock(&mod_pool.mutex);
+
+	if (mod_pool.queue_work)
+		queue_work(mod_pool.workqueue, &mod_pool.work);
+
+	return 0;
+}
+EXPORT_SYMBOL(syna_tcm_add_module);
+
+static void syna_tcm_module_work(struct work_struct *work)
+{
+	struct syna_tcm_module_handler *mod_handler;
+	struct syna_tcm_module_handler *tmp_handler;
+	struct syna_tcm_hcd *tcm_hcd = mod_pool.tcm_hcd;
+
+	mutex_lock(&mod_pool.mutex);
+	mod_pool.reconstructing = true;
+
+	if (!list_empty(&mod_pool.list)) {
+		list_for_each_entry_safe(mod_handler,
+				tmp_handler,
+				&mod_pool.list,
+				link) {
+			if (mod_handler->insert) {
+				if (mod_handler->mod_cb->init)
+					mod_handler->mod_cb->init(tcm_hcd);
+				mod_handler->insert = false;
+			}
+			if (mod_handler->detach) {
+				if (mod_handler->mod_cb->remove)
+					mod_handler->mod_cb->remove(tcm_hcd);
+				list_del(&mod_handler->link);
+				kfree(mod_handler);
+			}
+		}
+	}
+
+	mod_pool.reconstructing = false;
+	mutex_unlock(&mod_pool.mutex);
+}
+
+/**
+ * syna_tcm_report_notifier() - notify occurrence of report received from device
+ *
+ * @data: handle of core module
+ *
+ * The occurrence of the report generated by the device is forwarded to the
+ * asynchronous inbox of each registered application module.
+ */
+static int syna_tcm_report_notifier(void *data)
+{
+	struct sched_param param = { .sched_priority = NOTIFIER_PRIORITY };
+	struct syna_tcm_module_handler *mod_handler;
+	struct syna_tcm_hcd *tcm_hcd = data;
+
+	sched_setscheduler_nocheck(current, SCHED_RR, &param);
+
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	while (!kthread_should_stop()) {
+		schedule();
+
+		if (kthread_should_stop())
+			break;
+
+		set_current_state(TASK_RUNNING);
+
+		mutex_lock(&mod_pool.mutex);
+		mod_pool.reconstructing = true;
+
+		if (!list_empty(&mod_pool.list)) {
+			list_for_each_entry(mod_handler, &mod_pool.list, link) {
+				if (!mod_handler->insert &&
+						!mod_handler->detach &&
+						(mod_handler->mod_cb->asyncbox))
+					mod_handler->mod_cb->asyncbox(tcm_hcd);
+			}
+		}
+
+		mod_pool.reconstructing = false;
+		mutex_unlock(&mod_pool.mutex);
+
+		set_current_state(TASK_INTERRUPTIBLE);
+	};
+
+	return 0;
+}
+
+/**
+ * syna_tcm_dispatch_report() - dispatch report received from device
+ *
+ * @tcm_hcd: handle of core module
+ *
+ * The report generated by the device is forwarded to the synchronous inbox of
+ * each registered application module for further processing. In addition, the
+ * report notifier thread is woken up for asynchronous notification of the
+ * report occurrence.
+ */
+static void syna_tcm_dispatch_report(struct syna_tcm_hcd *tcm_hcd)
+{
+	struct syna_tcm_module_handler *mod_handler;
+
+	LOCK_BUFFER(tcm_hcd->in);
+	LOCK_BUFFER(tcm_hcd->report.buffer);
+
+	tcm_hcd->report.buffer.buf = &tcm_hcd->in.buf[MESSAGE_HEADER_SIZE];
+
+	tcm_hcd->report.buffer.buf_size = tcm_hcd->in.buf_size;
+	tcm_hcd->report.buffer.buf_size -= MESSAGE_HEADER_SIZE;
+
+	tcm_hcd->report.buffer.data_length = tcm_hcd->payload_length;
+
+	tcm_hcd->report.id = tcm_hcd->status_report_code;
+
+	mutex_lock(&mod_pool.mutex);
+
+	if (!list_empty(&mod_pool.list)) {
+		list_for_each_entry(mod_handler, &mod_pool.list, link) {
+			if (!mod_handler->insert &&
+					!mod_handler->detach &&
+					(mod_handler->mod_cb->syncbox))
+				mod_handler->mod_cb->syncbox(tcm_hcd);
+		}
+	}
+
+	tcm_hcd->async_report_id = tcm_hcd->status_report_code;
+
+	mutex_unlock(&mod_pool.mutex);
+
+	UNLOCK_BUFFER(tcm_hcd->report.buffer);
+	UNLOCK_BUFFER(tcm_hcd->in);
+
+	wake_up_process(tcm_hcd->notifier_thread);
+}
+
+/**
+ * syna_tcm_dispatch_response() - dispatch response received from device
+ *
+ * @tcm_hcd: handle of core module
+ *
+ * The response to a command is forwarded to the sender of the command.
+ */
+static void syna_tcm_dispatch_response(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+
+	if (atomic_read(&tcm_hcd->command_status) != CMD_BUSY)
+		return;
+
+	tcm_hcd->response_code = tcm_hcd->status_report_code;
+
+	if (tcm_hcd->payload_length == 0) {
+		atomic_set(&tcm_hcd->command_status, CMD_IDLE);
+		goto exit;
+	}
+
+	LOCK_BUFFER(tcm_hcd->resp);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&tcm_hcd->resp,
+			tcm_hcd->payload_length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for tcm_hcd->resp.buf\n");
+		UNLOCK_BUFFER(tcm_hcd->resp);
+		atomic_set(&tcm_hcd->command_status, CMD_ERROR);
+		goto exit;
+	}
+
+	LOCK_BUFFER(tcm_hcd->in);
+
+	retval = secure_memcpy(tcm_hcd->resp.buf,
+			tcm_hcd->resp.buf_size,
+			&tcm_hcd->in.buf[MESSAGE_HEADER_SIZE],
+			tcm_hcd->in.buf_size - MESSAGE_HEADER_SIZE,
+			tcm_hcd->payload_length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy payload\n");
+		UNLOCK_BUFFER(tcm_hcd->in);
+		UNLOCK_BUFFER(tcm_hcd->resp);
+		atomic_set(&tcm_hcd->command_status, CMD_ERROR);
+		goto exit;
+	}
+
+	tcm_hcd->resp.data_length = tcm_hcd->payload_length;
+
+	UNLOCK_BUFFER(tcm_hcd->in);
+	UNLOCK_BUFFER(tcm_hcd->resp);
+
+	atomic_set(&tcm_hcd->command_status, CMD_IDLE);
+
+exit:
+	complete(&response_complete);
+}
+
+/**
+ * syna_tcm_dispatch_message() - dispatch message received from device
+ *
+ * @tcm_hcd: handle of core module
+ *
+ * The information received in the message read in from the device is dispatched
+ * to the appropriate destination based on whether the information represents a
+ * report or a response to a command.
+ */
+static void syna_tcm_dispatch_message(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	unsigned char *build_id;
+	unsigned int payload_length;
+	unsigned int max_write_size;
+
+	if (tcm_hcd->status_report_code == REPORT_IDENTIFY) {
+		payload_length = tcm_hcd->payload_length;
+
+		LOCK_BUFFER(tcm_hcd->in);
+
+		retval = secure_memcpy((unsigned char *)&tcm_hcd->id_info,
+				sizeof(tcm_hcd->id_info),
+				&tcm_hcd->in.buf[MESSAGE_HEADER_SIZE],
+				tcm_hcd->in.buf_size - MESSAGE_HEADER_SIZE,
+				MIN(sizeof(tcm_hcd->id_info), payload_length));
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to copy identification info\n");
+			UNLOCK_BUFFER(tcm_hcd->in);
+			return;
+		}
+
+		UNLOCK_BUFFER(tcm_hcd->in);
+
+		build_id = tcm_hcd->id_info.build_id;
+		tcm_hcd->packrat_number = le4_to_uint(build_id);
+
+		max_write_size = le2_to_uint(tcm_hcd->id_info.max_write_size);
+		tcm_hcd->wr_chunk_size = MIN(max_write_size, WR_CHUNK_SIZE);
+		if (tcm_hcd->wr_chunk_size == 0)
+			tcm_hcd->wr_chunk_size = max_write_size;
+
+		LOGD(tcm_hcd->pdev->dev.parent,
+			"Received identify report (firmware mode = 0x%02x)\n",
+			tcm_hcd->id_info.mode);
+
+		if (atomic_read(&tcm_hcd->command_status) == CMD_BUSY) {
+			switch (tcm_hcd->command) {
+			case CMD_RESET:
+			case CMD_RUN_BOOTLOADER_FIRMWARE:
+			case CMD_RUN_APPLICATION_FIRMWARE:
+			case CMD_ENTER_PRODUCTION_TEST_MODE:
+				tcm_hcd->response_code = STATUS_OK;
+				atomic_set(&tcm_hcd->command_status, CMD_IDLE);
+				complete(&response_complete);
+				break;
+			default:
+				LOGN(tcm_hcd->pdev->dev.parent,
+						"Device has been reset\n");
+				atomic_set(&tcm_hcd->command_status, CMD_ERROR);
+				complete(&response_complete);
+				break;
+			}
+		}
+
+		if (tcm_hcd->id_info.mode == MODE_HOST_DOWNLOAD) {
+			tcm_hcd->host_download_mode = true;
+			return;
+		}
+
+#ifdef FORCE_RUN_APPLICATION_FIRMWARE
+		if (tcm_hcd->id_info.mode != MODE_APPLICATION &&
+				!mutex_is_locked(&tcm_hcd->reset_mutex)) {
+			if (atomic_read(&tcm_hcd->helper.task) == HELP_NONE) {
+				atomic_set(&tcm_hcd->helper.task,
+						HELP_RUN_APPLICATION_FIRMWARE);
+				queue_work(tcm_hcd->helper.workqueue,
+						&tcm_hcd->helper.work);
+				return;
+			}
+		}
+#endif
+	}
+
+	if (tcm_hcd->status_report_code >= REPORT_IDENTIFY) {
+		if ((mod_pool.reconstructing)
+			&& (tcm_hcd->status_report_code == REPORT_TOUCH))
+			return;
+		syna_tcm_dispatch_report(tcm_hcd);
+
+	} else
+		syna_tcm_dispatch_response(tcm_hcd);
+
+}
+
+/**
+ * syna_tcm_continued_read() - retrieve entire payload from device
+ *
+ * @tcm_hcd: handle of core module
+ *
+ * Read transactions are carried out until the entire payload is retrieved from
+ * the device and stored in the handle of the core module.
+ */
+static int syna_tcm_continued_read(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	unsigned char marker;
+	unsigned char code;
+	unsigned int idx;
+	unsigned int offset;
+	unsigned int chunks;
+	unsigned int chunk_space;
+	unsigned int xfer_length;
+	unsigned int total_length;
+	unsigned int remaining_length;
+
+	total_length = MESSAGE_HEADER_SIZE + tcm_hcd->payload_length + 1;
+
+	remaining_length = total_length - tcm_hcd->read_length;
+
+	LOCK_BUFFER(tcm_hcd->in);
+
+	retval = syna_tcm_realloc_mem(tcm_hcd,
+			&tcm_hcd->in,
+			total_length + 1);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to reallocate memory for tcm_hcd->in.buf\n");
+		UNLOCK_BUFFER(tcm_hcd->in);
+		return retval;
+	}
+
+	/**
+	 * available chunk space for payload = total chunk size minus header
+	 * marker byte and header code byte
+	 */
+	if (tcm_hcd->rd_chunk_size == 0)
+		chunk_space = remaining_length;
+	else
+		chunk_space = tcm_hcd->rd_chunk_size - 2;
+
+	chunks = ceil_div(remaining_length, chunk_space);
+
+	chunks = chunks == 0 ? 1 : chunks;
+
+	offset = tcm_hcd->read_length;
+
+	LOCK_BUFFER(tcm_hcd->temp);
+
+	for (idx = 0; idx < chunks; idx++) {
+		if (remaining_length > chunk_space)
+			xfer_length = chunk_space;
+		else
+			xfer_length = remaining_length;
+
+		if (xfer_length == 1) {
+			tcm_hcd->in.buf[offset] = MESSAGE_PADDING;
+			offset += xfer_length;
+			remaining_length -= xfer_length;
+			continue;
+		}
+
+		retval = syna_tcm_alloc_mem(tcm_hcd,
+				&tcm_hcd->temp,
+				xfer_length + 2);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for temp.buf\n");
+			UNLOCK_BUFFER(tcm_hcd->temp);
+			UNLOCK_BUFFER(tcm_hcd->in);
+			return retval;
+		}
+
+		retval = syna_tcm_read(tcm_hcd,
+				tcm_hcd->temp.buf,
+				xfer_length + 2);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to read from device\n");
+			UNLOCK_BUFFER(tcm_hcd->temp);
+			UNLOCK_BUFFER(tcm_hcd->in);
+			return retval;
+		}
+
+		marker = tcm_hcd->temp.buf[0];
+		code = tcm_hcd->temp.buf[1];
+
+		if (marker != MESSAGE_MARKER) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Incorrect header marker (0x%02x)\n",
+					marker);
+			UNLOCK_BUFFER(tcm_hcd->temp);
+			UNLOCK_BUFFER(tcm_hcd->in);
+			return -EIO;
+		}
+
+		if (code != STATUS_CONTINUED_READ) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Incorrect header code (0x%02x)\n",
+					code);
+			UNLOCK_BUFFER(tcm_hcd->temp);
+			UNLOCK_BUFFER(tcm_hcd->in);
+			return -EIO;
+		}
+
+		retval = secure_memcpy(&tcm_hcd->in.buf[offset],
+				tcm_hcd->in.buf_size - offset,
+				&tcm_hcd->temp.buf[2],
+				tcm_hcd->temp.buf_size - 2,
+				xfer_length);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to copy payload\n");
+			UNLOCK_BUFFER(tcm_hcd->temp);
+			UNLOCK_BUFFER(tcm_hcd->in);
+			return retval;
+		}
+
+		offset += xfer_length;
+
+		remaining_length -= xfer_length;
+	}
+
+	UNLOCK_BUFFER(tcm_hcd->temp);
+	UNLOCK_BUFFER(tcm_hcd->in);
+
+	return 0;
+}
+
+/**
+ * syna_tcm_raw_read() - retrieve specific number of data bytes from device
+ *
+ * @tcm_hcd: handle of core module
+ * @in_buf: buffer for storing data retrieved from device
+ * @length: number of bytes to retrieve from device
+ *
+ * Read transactions are carried out until the specific number of data bytes
+ * are retrieved from the device and stored in in_buf.
+ */
+static int syna_tcm_raw_read(struct syna_tcm_hcd *tcm_hcd,
+		unsigned char *in_buf, unsigned int length)
+{
+	int retval;
+	unsigned char code;
+	unsigned int idx;
+	unsigned int offset;
+	unsigned int chunks;
+	unsigned int chunk_space;
+	unsigned int xfer_length;
+	unsigned int remaining_length;
+
+	if (length < 2) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid length information\n");
+		return -EINVAL;
+	}
+
+	/* minus header marker byte and header code byte */
+	remaining_length = length - 2;
+
+	/**
+	 * available chunk space for data = total chunk size minus header
+	 * marker byte and header code byte
+	 */
+	if (tcm_hcd->rd_chunk_size == 0)
+		chunk_space = remaining_length;
+	else
+		chunk_space = tcm_hcd->rd_chunk_size - 2;
+
+	chunks = ceil_div(remaining_length, chunk_space);
+
+	chunks = chunks == 0 ? 1 : chunks;
+
+	offset = 0;
+
+	LOCK_BUFFER(tcm_hcd->temp);
+
+	for (idx = 0; idx < chunks; idx++) {
+		if (remaining_length > chunk_space)
+			xfer_length = chunk_space;
+		else
+			xfer_length = remaining_length;
+
+		if (xfer_length == 1) {
+			in_buf[offset] = MESSAGE_PADDING;
+			offset += xfer_length;
+			remaining_length -= xfer_length;
+			continue;
+		}
+
+		retval = syna_tcm_alloc_mem(tcm_hcd,
+				&tcm_hcd->temp,
+				xfer_length + 2);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for temp.buf\n");
+			UNLOCK_BUFFER(tcm_hcd->temp);
+			return retval;
+		}
+
+		retval = syna_tcm_read(tcm_hcd,
+				tcm_hcd->temp.buf,
+				xfer_length + 2);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to read from device\n");
+			UNLOCK_BUFFER(tcm_hcd->temp);
+			return retval;
+		}
+
+		code = tcm_hcd->temp.buf[1];
+
+		if (idx == 0) {
+			retval = secure_memcpy(&in_buf[0],
+					length,
+					&tcm_hcd->temp.buf[0],
+					tcm_hcd->temp.buf_size,
+					xfer_length + 2);
+		} else {
+			if (code != STATUS_CONTINUED_READ) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Incorrect header code (0x%02x)\n",
+					code);
+				UNLOCK_BUFFER(tcm_hcd->temp);
+				return -EIO;
+			}
+
+			retval = secure_memcpy(&in_buf[offset],
+					length - offset,
+					&tcm_hcd->temp.buf[2],
+					tcm_hcd->temp.buf_size - 2,
+					xfer_length);
+		}
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to copy data\n");
+			UNLOCK_BUFFER(tcm_hcd->temp);
+			return retval;
+		}
+
+		if (idx == 0)
+			offset += (xfer_length + 2);
+		else
+			offset += xfer_length;
+
+		remaining_length -= xfer_length;
+	}
+
+	UNLOCK_BUFFER(tcm_hcd->temp);
+
+	return 0;
+}
+
+/**
+ * syna_tcm_raw_write() - write command/data to device without receiving
+ * response
+ *
+ * @tcm_hcd: handle of core module
+ * @command: command to send to device
+ * @data: data to send to device
+ * @length: length of data in bytes
+ *
+ * A command and its data, if any, are sent to the device.
+ */
+static int syna_tcm_raw_write(struct syna_tcm_hcd *tcm_hcd,
+	unsigned char command, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned int idx;
+	unsigned int chunks;
+	unsigned int chunk_space;
+	unsigned int xfer_length;
+	unsigned int remaining_length;
+
+	remaining_length = length;
+
+	/**
+	 * available chunk space for data = total chunk size minus command
+	 * byte
+	 */
+	if (tcm_hcd->wr_chunk_size == 0)
+		chunk_space = remaining_length;
+	else
+		chunk_space = tcm_hcd->wr_chunk_size - 1;
+
+	chunks = ceil_div(remaining_length, chunk_space);
+
+	chunks = chunks == 0 ? 1 : chunks;
+
+	LOCK_BUFFER(tcm_hcd->out);
+
+	for (idx = 0; idx < chunks; idx++) {
+		if (remaining_length > chunk_space)
+			xfer_length = chunk_space;
+		else
+			xfer_length = remaining_length;
+
+		retval = syna_tcm_alloc_mem(tcm_hcd,
+				&tcm_hcd->out,
+				xfer_length + 1);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for out.buf\n");
+			UNLOCK_BUFFER(tcm_hcd->out);
+			return retval;
+		}
+
+		if (idx == 0)
+			tcm_hcd->out.buf[0] = command;
+		else
+			tcm_hcd->out.buf[0] = CMD_CONTINUE_WRITE;
+
+		if (xfer_length) {
+			retval = secure_memcpy(&tcm_hcd->out.buf[1],
+					tcm_hcd->out.buf_size - 1,
+					&data[idx * chunk_space],
+					remaining_length,
+					xfer_length);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to copy data\n");
+				UNLOCK_BUFFER(tcm_hcd->out);
+				return retval;
+			}
+		}
+
+		retval = syna_tcm_write(tcm_hcd,
+				tcm_hcd->out.buf,
+				xfer_length + 1);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to write to device\n");
+			UNLOCK_BUFFER(tcm_hcd->out);
+			return retval;
+		}
+
+		remaining_length -= xfer_length;
+	}
+
+	UNLOCK_BUFFER(tcm_hcd->out);
+
+	return 0;
+}
+
+/**
+ * syna_tcm_read_message() - read message from device
+ *
+ * @tcm_hcd: handle of core module
+ * @in_buf: buffer for storing data in raw read mode
+ * @length: length of data in bytes in raw read mode
+ *
+ * If in_buf is not NULL, raw read mode is used and syna_tcm_raw_read() is
+ * called. Otherwise, a message including its entire payload is retrieved from
+ * the device and dispatched to the appropriate destination.
+ */
+static int syna_tcm_read_message(struct syna_tcm_hcd *tcm_hcd,
+		unsigned char *in_buf, unsigned int length)
+{
+	int retval;
+	bool retry;
+	unsigned int total_length;
+	struct syna_tcm_message_header *header;
+
+	mutex_lock(&tcm_hcd->rw_ctrl_mutex);
+
+	if (in_buf != NULL) {
+		retval = syna_tcm_raw_read(tcm_hcd, in_buf, length);
+		goto exit;
+	}
+
+	retry = true;
+
+retry:
+	LOCK_BUFFER(tcm_hcd->in);
+
+	retval = syna_tcm_read(tcm_hcd,
+			tcm_hcd->in.buf,
+			tcm_hcd->read_length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read from device\n");
+		UNLOCK_BUFFER(tcm_hcd->in);
+		if (retry) {
+			usleep_range(READ_RETRY_US_MIN, READ_RETRY_US_MAX);
+			retry = false;
+			goto retry;
+		}
+		goto exit;
+	}
+
+	header = (struct syna_tcm_message_header *)tcm_hcd->in.buf;
+
+	if (header->marker != MESSAGE_MARKER) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Incorrect header marker (0x%02x)\n",
+				header->marker);
+		UNLOCK_BUFFER(tcm_hcd->in);
+		retval = -ENXIO;
+		if (retry) {
+			usleep_range(READ_RETRY_US_MIN, READ_RETRY_US_MAX);
+			retry = false;
+			goto retry;
+		}
+		goto exit;
+	}
+
+	tcm_hcd->status_report_code = header->code;
+
+	tcm_hcd->payload_length = le2_to_uint(header->length);
+
+	LOGD(tcm_hcd->pdev->dev.parent,
+			"Header code = 0x%02x\n",
+			tcm_hcd->status_report_code);
+
+	LOGD(tcm_hcd->pdev->dev.parent,
+			"Payload length = %d\n",
+			tcm_hcd->payload_length);
+
+	if (tcm_hcd->status_report_code <= STATUS_ERROR ||
+			tcm_hcd->status_report_code == STATUS_INVALID) {
+		switch (tcm_hcd->status_report_code) {
+		case STATUS_OK:
+			break;
+		case STATUS_CONTINUED_READ:
+			LOGD(tcm_hcd->pdev->dev.parent,
+					"Out-of-sync continued read\n");
+		case STATUS_IDLE:
+		case STATUS_BUSY:
+			tcm_hcd->payload_length = 0;
+			UNLOCK_BUFFER(tcm_hcd->in);
+			retval = 0;
+			goto exit;
+		default:
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Incorrect header code (0x%02x)\n",
+					tcm_hcd->status_report_code);
+			if (tcm_hcd->status_report_code == STATUS_INVALID) {
+				if (retry) {
+					usleep_range(READ_RETRY_US_MIN,
+							READ_RETRY_US_MAX);
+					retry = false;
+					goto retry;
+				} else {
+					tcm_hcd->payload_length = 0;
+				}
+			}
+		}
+	}
+
+	total_length = MESSAGE_HEADER_SIZE + tcm_hcd->payload_length + 1;
+
+#ifdef PREDICTIVE_READING
+	if (total_length <= tcm_hcd->read_length) {
+		goto check_padding;
+	} else if (total_length - 1 == tcm_hcd->read_length) {
+		tcm_hcd->in.buf[total_length - 1] = MESSAGE_PADDING;
+		goto check_padding;
+	}
+#else
+	if (tcm_hcd->payload_length == 0) {
+		tcm_hcd->in.buf[total_length - 1] = MESSAGE_PADDING;
+		goto check_padding;
+	}
+#endif
+
+	UNLOCK_BUFFER(tcm_hcd->in);
+
+	retval = syna_tcm_continued_read(tcm_hcd);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do continued read\n");
+		goto exit;
+	};
+
+	LOCK_BUFFER(tcm_hcd->in);
+
+	tcm_hcd->in.buf[0] = MESSAGE_MARKER;
+	tcm_hcd->in.buf[1] = tcm_hcd->status_report_code;
+	tcm_hcd->in.buf[2] = (unsigned char)tcm_hcd->payload_length;
+	tcm_hcd->in.buf[3] = (unsigned char)(tcm_hcd->payload_length >> 8);
+
+check_padding:
+	if (tcm_hcd->in.buf[total_length - 1] != MESSAGE_PADDING) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Incorrect message padding byte (0x%02x)\n",
+				tcm_hcd->in.buf[total_length - 1]);
+		UNLOCK_BUFFER(tcm_hcd->in);
+		retval = -EIO;
+		goto exit;
+	}
+
+	UNLOCK_BUFFER(tcm_hcd->in);
+
+#ifdef PREDICTIVE_READING
+	total_length = MAX(total_length, MIN_READ_LENGTH);
+	tcm_hcd->read_length = MIN(total_length, tcm_hcd->rd_chunk_size);
+	if (tcm_hcd->rd_chunk_size == 0)
+		tcm_hcd->read_length = total_length;
+#endif
+
+	mutex_unlock(&tcm_hcd->rw_ctrl_mutex);
+
+	syna_tcm_dispatch_message(tcm_hcd);
+
+	retval = 0;
+
+	return retval;
+
+exit:
+	if (retval < 0) {
+		if (atomic_read(&tcm_hcd->command_status) == CMD_BUSY) {
+			atomic_set(&tcm_hcd->command_status, CMD_ERROR);
+			complete(&response_complete);
+		}
+	}
+
+	mutex_unlock(&tcm_hcd->rw_ctrl_mutex);
+
+	return retval;
+}
+
+/**
+ * syna_tcm_write_message() - write message to device and receive response
+ *
+ * @tcm_hcd: handle of core module
+ * @command: command to send to device
+ * @payload: payload of command
+ * @length: length of payload in bytes
+ * @resp_buf: buffer for storing command response
+ * @resp_buf_size: size of response buffer in bytes
+ * @resp_length: length of command response in bytes
+ * @response_code: status code returned in command response
+ * @polling_delay_ms: delay time after sending command before resuming polling
+ *
+ * If resp_buf is NULL, raw write mode is used and syna_tcm_raw_write() is
+ * called. Otherwise, a command and its payload, if any, are sent to the device
+ * and the response to the command generated by the device is read in.
+ */
+static int syna_tcm_write_message(struct syna_tcm_hcd *tcm_hcd,
+		unsigned char command, unsigned char *payload,
+		unsigned int length, unsigned char **resp_buf,
+		unsigned int *resp_buf_size, unsigned int *resp_length,
+		unsigned char *response_code, unsigned int polling_delay_ms)
+{
+	int retval;
+	unsigned int idx;
+	unsigned int chunks;
+	unsigned int chunk_space;
+	unsigned int xfer_length;
+	unsigned int remaining_length;
+	unsigned int command_status;
+
+	if (response_code != NULL)
+		*response_code = STATUS_INVALID;
+
+	if (!tcm_hcd->do_polling && current->pid == tcm_hcd->isr_pid) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid execution context\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&tcm_hcd->command_mutex);
+
+	mutex_lock(&tcm_hcd->rw_ctrl_mutex);
+
+	if (resp_buf == NULL) {
+		retval = syna_tcm_raw_write(tcm_hcd, command, payload, length);
+		mutex_unlock(&tcm_hcd->rw_ctrl_mutex);
+		goto exit;
+	}
+
+	if (tcm_hcd->do_polling && polling_delay_ms) {
+		cancel_delayed_work_sync(&tcm_hcd->polling_work);
+		flush_workqueue(tcm_hcd->polling_workqueue);
+	}
+
+	atomic_set(&tcm_hcd->command_status, CMD_BUSY);
+
+	reinit_completion(&response_complete);
+
+	tcm_hcd->command = command;
+
+	LOCK_BUFFER(tcm_hcd->resp);
+
+	tcm_hcd->resp.buf = *resp_buf;
+	tcm_hcd->resp.buf_size = *resp_buf_size;
+	tcm_hcd->resp.data_length = 0;
+
+	UNLOCK_BUFFER(tcm_hcd->resp);
+
+	/* adding two length bytes as part of payload */
+	remaining_length = length + 2;
+
+	/**
+	 * available chunk space for payload = total chunk size minus command
+	 * byte
+	 */
+	if (tcm_hcd->wr_chunk_size == 0)
+		chunk_space = remaining_length;
+	else
+		chunk_space = tcm_hcd->wr_chunk_size - 1;
+
+	chunks = ceil_div(remaining_length, chunk_space);
+
+	chunks = chunks == 0 ? 1 : chunks;
+
+	LOGD(tcm_hcd->pdev->dev.parent,
+			"Command = 0x%02x\n",
+			command);
+
+	LOCK_BUFFER(tcm_hcd->out);
+
+	for (idx = 0; idx < chunks; idx++) {
+		if (remaining_length > chunk_space)
+			xfer_length = chunk_space;
+		else
+			xfer_length = remaining_length;
+
+		retval = syna_tcm_alloc_mem(tcm_hcd,
+				&tcm_hcd->out,
+				xfer_length + 1);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for out.buf\n");
+			UNLOCK_BUFFER(tcm_hcd->out);
+			mutex_unlock(&tcm_hcd->rw_ctrl_mutex);
+			goto exit;
+		}
+
+		if (idx == 0) {
+			tcm_hcd->out.buf[0] = command;
+			tcm_hcd->out.buf[1] = (unsigned char)length;
+			tcm_hcd->out.buf[2] = (unsigned char)(length >> 8);
+
+			if (xfer_length > 2) {
+				retval = secure_memcpy(&tcm_hcd->out.buf[3],
+						tcm_hcd->out.buf_size - 3,
+						payload,
+						remaining_length - 2,
+						xfer_length - 2);
+				if (retval < 0) {
+					LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to copy payload\n");
+					UNLOCK_BUFFER(tcm_hcd->out);
+					mutex_unlock(&tcm_hcd->rw_ctrl_mutex);
+					goto exit;
+				}
+			}
+		} else {
+			tcm_hcd->out.buf[0] = CMD_CONTINUE_WRITE;
+
+			retval = secure_memcpy(&tcm_hcd->out.buf[1],
+					tcm_hcd->out.buf_size - 1,
+					&payload[idx * chunk_space - 2],
+					remaining_length,
+					xfer_length);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to copy payload\n");
+				UNLOCK_BUFFER(tcm_hcd->out);
+				mutex_unlock(&tcm_hcd->rw_ctrl_mutex);
+				goto exit;
+			}
+		}
+
+		retval = syna_tcm_write(tcm_hcd,
+				tcm_hcd->out.buf,
+				xfer_length + 1);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to write to device\n");
+			UNLOCK_BUFFER(tcm_hcd->out);
+			mutex_unlock(&tcm_hcd->rw_ctrl_mutex);
+			goto exit;
+		}
+
+		remaining_length -= xfer_length;
+
+		if (chunks > 1)
+			usleep_range(WRITE_DELAY_US_MIN, WRITE_DELAY_US_MAX);
+	}
+
+	UNLOCK_BUFFER(tcm_hcd->out);
+
+	mutex_unlock(&tcm_hcd->rw_ctrl_mutex);
+
+	if (tcm_hcd->do_polling && polling_delay_ms) {
+		queue_delayed_work(tcm_hcd->polling_workqueue,
+				&tcm_hcd->polling_work,
+				msecs_to_jiffies(polling_delay_ms));
+	}
+
+	retval = wait_for_completion_timeout(&response_complete,
+			msecs_to_jiffies(RESPONSE_TIMEOUT_MS));
+	if (retval == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Timed out waiting for response (command 0x%02x)\n",
+			tcm_hcd->command);
+		retval = -EIO;
+		goto exit;
+	}
+
+	command_status = atomic_read(&tcm_hcd->command_status);
+	if (command_status != CMD_IDLE) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to get valid response (command 0x%02x)\n",
+			tcm_hcd->command);
+		retval = -EIO;
+		goto exit;
+	}
+
+	LOCK_BUFFER(tcm_hcd->resp);
+
+	if (tcm_hcd->response_code != STATUS_OK) {
+		if (tcm_hcd->resp.data_length) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Error code = 0x%02x (command 0x%02x)\n",
+				tcm_hcd->resp.buf[0], tcm_hcd->command);
+		}
+		retval = -EIO;
+	} else {
+		retval = 0;
+	}
+
+	*resp_buf = tcm_hcd->resp.buf;
+	*resp_buf_size = tcm_hcd->resp.buf_size;
+	*resp_length = tcm_hcd->resp.data_length;
+
+	if (response_code != NULL)
+		*response_code = tcm_hcd->response_code;
+
+	UNLOCK_BUFFER(tcm_hcd->resp);
+
+exit:
+	tcm_hcd->command = CMD_NONE;
+
+	atomic_set(&tcm_hcd->command_status, CMD_IDLE);
+
+	mutex_unlock(&tcm_hcd->command_mutex);
+
+	return retval;
+}
+
+static int syna_tcm_wait_hdl(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+
+	msleep(HOST_DOWNLOAD_WAIT_MS);
+
+	if (!atomic_read(&tcm_hcd->host_downloading))
+		return 0;
+
+	retval = wait_event_interruptible_timeout(tcm_hcd->hdl_wq,
+			!atomic_read(&tcm_hcd->host_downloading),
+			msecs_to_jiffies(HOST_DOWNLOAD_TIMEOUT_MS));
+	if (retval == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Timed out waiting for completion of host download\n");
+		atomic_set(&tcm_hcd->host_downloading, 0);
+		retval = -EIO;
+	} else {
+		retval = 0;
+	}
+
+	return retval;
+}
+
+static void syna_tcm_check_hdl(struct syna_tcm_hcd *tcm_hcd)
+{
+	struct syna_tcm_module_handler *mod_handler;
+
+	LOCK_BUFFER(tcm_hcd->report.buffer);
+
+	tcm_hcd->report.buffer.buf = NULL;
+	tcm_hcd->report.buffer.buf_size = 0;
+	tcm_hcd->report.buffer.data_length = 0;
+	tcm_hcd->report.id = REPORT_HDL;
+
+	UNLOCK_BUFFER(tcm_hcd->report.buffer);
+
+	mutex_lock(&mod_pool.mutex);
+
+	if (!list_empty(&mod_pool.list)) {
+		list_for_each_entry(mod_handler, &mod_pool.list, link) {
+			if (!mod_handler->insert &&
+					!mod_handler->detach &&
+					(mod_handler->mod_cb->syncbox))
+				mod_handler->mod_cb->syncbox(tcm_hcd);
+		}
+	}
+
+	mutex_unlock(&mod_pool.mutex);
+}
+
+static void syna_tcm_update_watchdog(struct syna_tcm_hcd *tcm_hcd, bool en)
+{
+	cancel_delayed_work_sync(&tcm_hcd->watchdog.work);
+	flush_workqueue(tcm_hcd->watchdog.workqueue);
+
+	if (!tcm_hcd->watchdog.run) {
+		tcm_hcd->watchdog.count = 0;
+		return;
+	}
+
+	if (en) {
+		queue_delayed_work(tcm_hcd->watchdog.workqueue,
+				&tcm_hcd->watchdog.work,
+				msecs_to_jiffies(WATCHDOG_DELAY_MS));
+	} else {
+		tcm_hcd->watchdog.count = 0;
+	}
+}
+
+static void syna_tcm_watchdog_work(struct work_struct *work)
+{
+	int retval;
+	struct delayed_work *delayed_work =
+			container_of(work, struct delayed_work, work);
+	struct syna_tcm_watchdog *watchdog =
+			container_of(delayed_work, struct syna_tcm_watchdog,
+			work);
+	struct syna_tcm_hcd *tcm_hcd =
+			container_of(watchdog, struct syna_tcm_hcd, watchdog);
+
+	if (mutex_is_locked(&tcm_hcd->rw_ctrl_mutex))
+		goto exit;
+
+	mutex_lock(&tcm_hcd->rw_ctrl_mutex);
+
+	retval = syna_tcm_read(tcm_hcd,
+			&tcm_hcd->marker,
+			1);
+
+	mutex_unlock(&tcm_hcd->rw_ctrl_mutex);
+
+	if (retval < 0 || tcm_hcd->marker != MESSAGE_MARKER) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read from device\n");
+
+		tcm_hcd->watchdog.count++;
+
+		if (tcm_hcd->watchdog.count >= WATCHDOG_TRIGGER_COUNT) {
+			retval = tcm_hcd->reset(tcm_hcd, true, false);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to do reset\n");
+			}
+			tcm_hcd->watchdog.count = 0;
+		}
+	}
+
+exit:
+	queue_delayed_work(tcm_hcd->watchdog.workqueue,
+			&tcm_hcd->watchdog.work,
+			msecs_to_jiffies(WATCHDOG_DELAY_MS));
+}
+
+static void syna_tcm_polling_work(struct work_struct *work)
+{
+	int retval;
+	struct delayed_work *delayed_work =
+			container_of(work, struct delayed_work, work);
+	struct syna_tcm_hcd *tcm_hcd =
+			container_of(delayed_work, struct syna_tcm_hcd,
+			polling_work);
+
+	if (!tcm_hcd->do_polling)
+		return;
+
+	retval = tcm_hcd->read_message(tcm_hcd,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read message\n");
+		if (retval == -ENXIO && tcm_hcd->hw_if->bus_io->type == BUS_SPI)
+			syna_tcm_check_hdl(tcm_hcd);
+	}
+
+	if (!(tcm_hcd->in_suspend && retval < 0)) {
+		queue_delayed_work(tcm_hcd->polling_workqueue,
+				&tcm_hcd->polling_work,
+				msecs_to_jiffies(POLLING_DELAY_MS));
+	}
+}
+
+static irqreturn_t syna_tcm_isr(int irq, void *data)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = data;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	if (unlikely(gpio_get_value(bdata->irq_gpio) != bdata->irq_on_state))
+		goto exit;
+
+	tcm_hcd->isr_pid = current->pid;
+
+	retval = tcm_hcd->read_message(tcm_hcd,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent, "Failed to read message\n");
+		if (retval == -ENXIO &&
+			tcm_hcd->hw_if->bus_io->type == BUS_SPI)
+			syna_tcm_check_hdl(tcm_hcd);
+	}
+
+exit:
+	return IRQ_HANDLED;
+}
+
+static int syna_tcm_enable_irq(struct syna_tcm_hcd *tcm_hcd, bool en, bool ns)
+{
+	int retval;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+	static bool irq_freed = true;
+
+	mutex_lock(&tcm_hcd->irq_en_mutex);
+
+	if (en) {
+		if (tcm_hcd->irq_enabled) {
+			LOGD(tcm_hcd->pdev->dev.parent,
+					"Interrupt already enabled\n");
+			retval = 0;
+			goto exit;
+		}
+
+		if (bdata->irq_gpio < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Invalid IRQ GPIO\n");
+			retval = -EINVAL;
+			goto queue_polling_work;
+		}
+
+		if (irq_freed) {
+			retval = request_threaded_irq(tcm_hcd->irq, NULL,
+					syna_tcm_isr, bdata->irq_flags,
+					PLATFORM_DRIVER_NAME, tcm_hcd);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to create interrupt thread\n");
+			}
+		} else {
+			enable_irq(tcm_hcd->irq);
+			retval = 0;
+		}
+
+queue_polling_work:
+		if (retval < 0) {
+#ifdef FALL_BACK_ON_POLLING
+			queue_delayed_work(tcm_hcd->polling_workqueue,
+					&tcm_hcd->polling_work,
+					msecs_to_jiffies(POLLING_DELAY_MS));
+			tcm_hcd->do_polling = true;
+			retval = 0;
+#endif
+		}
+
+		if (retval < 0)
+			goto exit;
+		else
+			msleep(ENABLE_IRQ_DELAY_MS);
+	} else {
+		if (!tcm_hcd->irq_enabled) {
+			LOGD(tcm_hcd->pdev->dev.parent,
+					"Interrupt already disabled\n");
+			retval = 0;
+			goto exit;
+		}
+
+		if (bdata->irq_gpio >= 0) {
+			if (ns) {
+				disable_irq_nosync(tcm_hcd->irq);
+			} else {
+				disable_irq(tcm_hcd->irq);
+				free_irq(tcm_hcd->irq, tcm_hcd);
+			}
+			irq_freed = !ns;
+		}
+
+		if (ns) {
+			cancel_delayed_work(&tcm_hcd->polling_work);
+		} else {
+			cancel_delayed_work_sync(&tcm_hcd->polling_work);
+			flush_workqueue(tcm_hcd->polling_workqueue);
+		}
+
+		tcm_hcd->do_polling = false;
+	}
+
+	retval = 0;
+
+exit:
+	if (retval == 0)
+		tcm_hcd->irq_enabled = en;
+
+	mutex_unlock(&tcm_hcd->irq_en_mutex);
+
+	return retval;
+}
+
+static int syna_tcm_set_gpio(struct syna_tcm_hcd *tcm_hcd, int gpio,
+		bool config, int dir, int state)
+{
+	int retval;
+	char label[16];
+
+	if (config) {
+		retval = snprintf(label, 16, "tcm_gpio_%d\n", gpio);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to set GPIO label\n");
+			return retval;
+		}
+
+		retval = gpio_request(gpio, label);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to request GPIO %d\n",
+					gpio);
+			return retval;
+		}
+
+		if (dir == 0)
+			retval = gpio_direction_input(gpio);
+		else
+			retval = gpio_direction_output(gpio, state);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to set GPIO %d direction\n",
+					gpio);
+			return retval;
+		}
+	} else {
+		gpio_free(gpio);
+	}
+
+	return 0;
+}
+
+static int syna_tcm_config_gpio(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	if (bdata->irq_gpio >= 0) {
+		retval = syna_tcm_set_gpio(tcm_hcd, bdata->irq_gpio,
+				true, 0, 0);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to configure interrupt GPIO\n");
+			goto err_set_gpio_irq;
+		}
+	}
+
+	if (bdata->power_gpio >= 0) {
+		retval = syna_tcm_set_gpio(tcm_hcd, bdata->power_gpio,
+				true, 1, !bdata->power_on_state);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to configure power GPIO\n");
+			goto err_set_gpio_power;
+		}
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		retval = syna_tcm_set_gpio(tcm_hcd, bdata->reset_gpio,
+				true, 1, !bdata->reset_on_state);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to configure reset GPIO\n");
+			goto err_set_gpio_reset;
+		}
+	}
+
+	if (bdata->power_gpio >= 0) {
+		gpio_set_value(bdata->power_gpio, bdata->power_on_state);
+		msleep(bdata->power_delay_ms);
+	}
+
+	if (bdata->reset_gpio >= 0) {
+		gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+		msleep(bdata->reset_active_ms);
+		gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+		msleep(bdata->reset_delay_ms);
+	}
+
+	return 0;
+
+err_set_gpio_reset:
+	if (bdata->power_gpio >= 0)
+		syna_tcm_set_gpio(tcm_hcd, bdata->power_gpio, false, 0, 0);
+
+err_set_gpio_power:
+	if (bdata->irq_gpio >= 0)
+		syna_tcm_set_gpio(tcm_hcd, bdata->irq_gpio, false, 0, 0);
+
+err_set_gpio_irq:
+	return retval;
+}
+
+static int syna_tcm_enable_regulator(struct syna_tcm_hcd *tcm_hcd, bool en)
+{
+	int retval;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	if (!en) {
+		retval = 0;
+		goto disable_pwr_reg;
+	}
+
+	if (tcm_hcd->bus_reg) {
+		retval = regulator_enable(tcm_hcd->bus_reg);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enable bus regulator\n");
+			goto exit;
+		}
+	}
+
+	if (tcm_hcd->pwr_reg) {
+		retval = regulator_enable(tcm_hcd->pwr_reg);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to enable power regulator\n");
+			goto disable_bus_reg;
+		}
+		msleep(bdata->power_delay_ms);
+	}
+
+	return 0;
+
+disable_pwr_reg:
+	if (tcm_hcd->pwr_reg)
+		regulator_disable(tcm_hcd->pwr_reg);
+
+disable_bus_reg:
+	if (tcm_hcd->bus_reg)
+		regulator_disable(tcm_hcd->bus_reg);
+
+exit:
+	return retval;
+}
+
+static int syna_tcm_get_regulator(struct syna_tcm_hcd *tcm_hcd, bool get)
+{
+	int retval;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	if (!get) {
+		retval = 0;
+		goto regulator_put;
+	}
+
+	if (bdata->bus_reg_name != NULL && *bdata->bus_reg_name != 0) {
+		tcm_hcd->bus_reg = regulator_get(tcm_hcd->pdev->dev.parent,
+				bdata->bus_reg_name);
+		if (IS_ERR(tcm_hcd->bus_reg)) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get bus regulator\n");
+			retval = PTR_ERR(tcm_hcd->bus_reg);
+			goto regulator_put;
+		}
+	}
+
+	if (bdata->pwr_reg_name != NULL && *bdata->pwr_reg_name != 0) {
+		tcm_hcd->pwr_reg = regulator_get(tcm_hcd->pdev->dev.parent,
+				bdata->pwr_reg_name);
+		if (IS_ERR(tcm_hcd->pwr_reg)) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get power regulator\n");
+			retval = PTR_ERR(tcm_hcd->pwr_reg);
+			goto regulator_put;
+		}
+	}
+
+	return 0;
+
+regulator_put:
+	if (tcm_hcd->bus_reg) {
+		regulator_put(tcm_hcd->bus_reg);
+		tcm_hcd->bus_reg = NULL;
+	}
+
+	if (tcm_hcd->pwr_reg) {
+		regulator_put(tcm_hcd->pwr_reg);
+		tcm_hcd->pwr_reg = NULL;
+	}
+
+	return retval;
+}
+
+static int syna_tcm_get_app_info(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+	unsigned int timeout;
+
+	timeout = APP_STATUS_POLL_TIMEOUT_MS;
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+get_app_info:
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_GET_APPLICATION_INFO,
+			NULL,
+			0,
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_GET_APPLICATION_INFO));
+		goto exit;
+	}
+
+	retval = secure_memcpy((unsigned char *)&tcm_hcd->app_info,
+			sizeof(tcm_hcd->app_info),
+			resp_buf,
+			resp_buf_size,
+			MIN(sizeof(tcm_hcd->app_info), resp_length));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy application info\n");
+		goto exit;
+	}
+
+	tcm_hcd->app_status = le2_to_uint(tcm_hcd->app_info.status);
+
+	if (tcm_hcd->app_status == APP_STATUS_BOOTING ||
+			tcm_hcd->app_status == APP_STATUS_UPDATING) {
+		if (timeout > 0) {
+			msleep(APP_STATUS_POLL_MS);
+			timeout -= APP_STATUS_POLL_MS;
+			goto get_app_info;
+		}
+	}
+
+	retval = 0;
+
+exit:
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static int syna_tcm_get_boot_info(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_GET_BOOT_INFO,
+			NULL,
+			0,
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_GET_BOOT_INFO));
+		goto exit;
+	}
+
+	retval = secure_memcpy((unsigned char *)&tcm_hcd->boot_info,
+			sizeof(tcm_hcd->boot_info),
+			resp_buf,
+			resp_buf_size,
+			MIN(sizeof(tcm_hcd->boot_info), resp_length));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy boot info\n");
+		goto exit;
+	}
+
+	retval = 0;
+
+exit:
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static int syna_tcm_identify(struct syna_tcm_hcd *tcm_hcd, bool id)
+{
+	int retval;
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+	unsigned int max_write_size;
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+	mutex_lock(&tcm_hcd->identify_mutex);
+
+	if (!id)
+		goto get_info;
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_IDENTIFY,
+			NULL,
+			0,
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_IDENTIFY));
+		goto exit;
+	}
+
+	retval = secure_memcpy((unsigned char *)&tcm_hcd->id_info,
+			sizeof(tcm_hcd->id_info),
+			resp_buf,
+			resp_buf_size,
+			MIN(sizeof(tcm_hcd->id_info), resp_length));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy identification info\n");
+		goto exit;
+	}
+
+	tcm_hcd->packrat_number = le4_to_uint(tcm_hcd->id_info.build_id);
+
+	max_write_size = le2_to_uint(tcm_hcd->id_info.max_write_size);
+	tcm_hcd->wr_chunk_size = MIN(max_write_size, WR_CHUNK_SIZE);
+	if (tcm_hcd->wr_chunk_size == 0)
+		tcm_hcd->wr_chunk_size = max_write_size;
+
+get_info:
+	switch (tcm_hcd->id_info.mode) {
+	case MODE_APPLICATION:
+		retval = syna_tcm_get_app_info(tcm_hcd);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get application info\n");
+			goto exit;
+		}
+		break;
+	case MODE_BOOTLOADER:
+	case MODE_TDDI_BOOTLOADER:
+		retval = syna_tcm_get_boot_info(tcm_hcd);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get boot info\n");
+			goto exit;
+		}
+		break;
+	default:
+		break;
+	}
+
+	retval = 0;
+
+exit:
+	mutex_unlock(&tcm_hcd->identify_mutex);
+
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static int syna_tcm_run_production_test_firmware(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	bool retry;
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+
+	retry = true;
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+retry:
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_ENTER_PRODUCTION_TEST_MODE,
+			NULL,
+			0,
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			MODE_SWITCH_DELAY_MS);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_ENTER_PRODUCTION_TEST_MODE));
+		goto exit;
+	}
+
+	if (tcm_hcd->id_info.mode != MODE_PRODUCTION_TEST) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run production test firmware\n");
+		if (retry) {
+			retry = false;
+			goto retry;
+		}
+		retval = -EINVAL;
+		goto exit;
+	} else if (tcm_hcd->app_status != APP_STATUS_OK) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Application status = 0x%02x\n",
+				tcm_hcd->app_status);
+	}
+
+	retval = 0;
+
+exit:
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static int syna_tcm_run_application_firmware(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	bool retry;
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+
+	retry = true;
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+retry:
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_RUN_APPLICATION_FIRMWARE,
+			NULL,
+			0,
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			MODE_SWITCH_DELAY_MS);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_RUN_APPLICATION_FIRMWARE));
+		goto exit;
+	}
+
+	retval = tcm_hcd->identify(tcm_hcd, false);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do identification\n");
+		goto exit;
+	}
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to run application (status = 0x%02x)\n",
+			tcm_hcd->boot_info.status);
+		if (retry) {
+			retry = false;
+			goto retry;
+		}
+		retval = -EINVAL;
+		goto exit;
+	} else if (tcm_hcd->app_status != APP_STATUS_OK) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Application status = 0x%02x\n",
+				tcm_hcd->app_status);
+	}
+
+	retval = 0;
+
+exit:
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static int syna_tcm_run_bootloader_firmware(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_RUN_BOOTLOADER_FIRMWARE,
+			NULL,
+			0,
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			MODE_SWITCH_DELAY_MS);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_RUN_BOOTLOADER_FIRMWARE));
+		goto exit;
+	}
+
+	retval = tcm_hcd->identify(tcm_hcd, false);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do identification\n");
+		goto exit;
+	}
+
+	if (tcm_hcd->id_info.mode == MODE_APPLICATION) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enter bootloader mode\n");
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = 0;
+
+exit:
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static int syna_tcm_switch_mode(struct syna_tcm_hcd *tcm_hcd,
+		enum firmware_mode mode)
+{
+	int retval;
+
+	mutex_lock(&tcm_hcd->reset_mutex);
+
+	tcm_hcd->update_watchdog(tcm_hcd, false);
+
+	switch (mode) {
+	case FW_MODE_BOOTLOADER:
+		retval = syna_tcm_run_bootloader_firmware(tcm_hcd);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to switch to bootloader mode\n");
+			goto exit;
+		}
+		break;
+	case FW_MODE_APPLICATION:
+		retval = syna_tcm_run_application_firmware(tcm_hcd);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to switch to application mode\n");
+			goto exit;
+		}
+		break;
+	case FW_MODE_PRODUCTION_TEST:
+		retval = syna_tcm_run_production_test_firmware(tcm_hcd);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to switch to production test mode\n");
+			goto exit;
+		}
+		break;
+	default:
+		LOGE(tcm_hcd->pdev->dev.parent, "Invalid firmware mode\n");
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	retval = 0;
+
+exit:
+	tcm_hcd->update_watchdog(tcm_hcd, true);
+
+	mutex_unlock(&tcm_hcd->reset_mutex);
+
+	return retval;
+}
+
+static int syna_tcm_get_dynamic_config(struct syna_tcm_hcd *tcm_hcd,
+		enum dynamic_config_id id, unsigned short *value)
+{
+	int retval;
+	unsigned char out_buf;
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+	out_buf = (unsigned char)id;
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_GET_DYNAMIC_CONFIG,
+			&out_buf,
+			sizeof(out_buf),
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_GET_DYNAMIC_CONFIG));
+		goto exit;
+	}
+
+	if (resp_length < 2) {
+		LOGE(tcm_hcd->pdev->dev.parent, "Invalid data length\n");
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	*value = (unsigned short)le2_to_uint(resp_buf);
+
+	retval = 0;
+
+exit:
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static int syna_tcm_set_dynamic_config(struct syna_tcm_hcd *tcm_hcd,
+		enum dynamic_config_id id, unsigned short value)
+{
+	int retval;
+	unsigned char out_buf[3];
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+	out_buf[0] = (unsigned char)id;
+	out_buf[1] = (unsigned char)value;
+	out_buf[2] = (unsigned char)(value >> 8);
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_SET_DYNAMIC_CONFIG,
+			out_buf,
+			sizeof(out_buf),
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_SET_DYNAMIC_CONFIG));
+		goto exit;
+	}
+
+	retval = 0;
+
+exit:
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static int syna_tcm_get_data_location(struct syna_tcm_hcd *tcm_hcd,
+		enum flash_area area, unsigned int *addr, unsigned int *length)
+{
+	int retval;
+	unsigned char out_buf;
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+
+	switch (area) {
+	case CUSTOM_LCM:
+		out_buf = LCM_DATA;
+		break;
+	case CUSTOM_OEM:
+		out_buf = OEM_DATA;
+		break;
+	case PPDT:
+		out_buf = PPDT_DATA;
+		break;
+	default:
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid flash area\n");
+		return -EINVAL;
+	}
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_GET_DATA_LOCATION,
+			&out_buf,
+			sizeof(out_buf),
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_GET_DATA_LOCATION));
+		goto exit;
+	}
+
+	if (resp_length != 4) {
+		LOGE(tcm_hcd->pdev->dev.parent, "Invalid data length\n");
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	*addr = le2_to_uint(&resp_buf[0]);
+	*length = le2_to_uint(&resp_buf[2]);
+
+	retval = 0;
+
+exit:
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static int syna_tcm_sleep(struct syna_tcm_hcd *tcm_hcd, bool en)
+{
+	int retval;
+	unsigned char command;
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+
+	command = en ? CMD_ENTER_DEEP_SLEEP : CMD_EXIT_DEEP_SLEEP;
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			command,
+			NULL,
+			0,
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				en ?
+				STR(CMD_ENTER_DEEP_SLEEP) :
+				STR(CMD_EXIT_DEEP_SLEEP));
+		goto exit;
+	}
+
+	retval = 0;
+
+exit:
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static int syna_tcm_reset(struct syna_tcm_hcd *tcm_hcd, bool hw, bool update_wd)
+{
+	int retval;
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+	struct syna_tcm_module_handler *mod_handler;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+	mutex_lock(&tcm_hcd->reset_mutex);
+
+	if (update_wd)
+		tcm_hcd->update_watchdog(tcm_hcd, false);
+
+	if (hw) {
+		if (bdata->reset_gpio < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Hardware reset unavailable\n");
+			retval = -EINVAL;
+			goto exit;
+		}
+		gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+		msleep(bdata->reset_active_ms);
+		gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+	} else {
+		retval = tcm_hcd->write_message(tcm_hcd,
+				CMD_RESET,
+				NULL,
+				0,
+				&resp_buf,
+				&resp_buf_size,
+				&resp_length,
+				NULL,
+				bdata->reset_delay_ms);
+		if (retval < 0 && !tcm_hcd->host_download_mode) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to write command %s\n",
+					STR(CMD_RESET));
+			goto exit;
+		}
+	}
+
+	if (tcm_hcd->host_download_mode) {
+		mutex_unlock(&tcm_hcd->reset_mutex);
+		kfree(resp_buf);
+		retval = syna_tcm_wait_hdl(tcm_hcd);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to wait for completion of download\n");
+			return retval;
+		}
+		if (update_wd)
+			tcm_hcd->update_watchdog(tcm_hcd, true);
+		return 0;
+	}
+
+	msleep(bdata->reset_delay_ms);
+
+	retval = tcm_hcd->identify(tcm_hcd, false);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do identification\n");
+		goto exit;
+	}
+
+	if (tcm_hcd->id_info.mode == MODE_APPLICATION)
+		goto get_features;
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_RUN_APPLICATION_FIRMWARE,
+			NULL,
+			0,
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			MODE_SWITCH_DELAY_MS);
+	if (retval < 0) {
+		LOGN(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_RUN_APPLICATION_FIRMWARE));
+	}
+
+	retval = tcm_hcd->identify(tcm_hcd, false);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do identification\n");
+		goto exit;
+	}
+
+get_features:
+	LOGD(tcm_hcd->pdev->dev.parent,
+			"Firmware mode = 0x%02x\n",
+			tcm_hcd->id_info.mode);
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION) {
+		LOGN(tcm_hcd->pdev->dev.parent,
+				"Boot status = 0x%02x\n",
+				tcm_hcd->boot_info.status);
+	} else if (tcm_hcd->app_status != APP_STATUS_OK) {
+		LOGN(tcm_hcd->pdev->dev.parent,
+				"Application status = 0x%02x\n",
+				tcm_hcd->app_status);
+	}
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION)
+		goto dispatch_reset;
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_GET_FEATURES,
+			NULL,
+			0,
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			0);
+	if (retval < 0)
+		LOGN(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_GET_FEATURES));
+	else {
+		retval = secure_memcpy((unsigned char *)&tcm_hcd->features,
+				sizeof(tcm_hcd->features),
+				resp_buf,
+				resp_buf_size,
+				MIN(sizeof(tcm_hcd->features), resp_length));
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy feature description\n");
+		}
+	}
+
+dispatch_reset:
+	mutex_lock(&mod_pool.mutex);
+	mod_pool.reconstructing = true;
+
+	if (!list_empty(&mod_pool.list)) {
+		list_for_each_entry(mod_handler, &mod_pool.list, link) {
+			if (!mod_handler->insert &&
+					!mod_handler->detach &&
+					(mod_handler->mod_cb->reset))
+				mod_handler->mod_cb->reset(tcm_hcd);
+		}
+	}
+
+	mod_pool.reconstructing = false;
+	mutex_unlock(&mod_pool.mutex);
+
+	retval = 0;
+
+exit:
+	if (update_wd)
+		tcm_hcd->update_watchdog(tcm_hcd, true);
+
+	mutex_unlock(&tcm_hcd->reset_mutex);
+
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static int syna_tcm_rezero(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	unsigned char *resp_buf;
+	unsigned int resp_buf_size;
+	unsigned int resp_length;
+
+	resp_buf = NULL;
+	resp_buf_size = 0;
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_REZERO,
+			NULL,
+			0,
+			&resp_buf,
+			&resp_buf_size,
+			&resp_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_REZERO));
+		goto exit;
+	}
+
+	retval = 0;
+
+exit:
+	kfree(resp_buf);
+
+	return retval;
+}
+
+static void syna_tcm_helper_work(struct work_struct *work)
+{
+	int retval;
+	unsigned char task;
+	struct syna_tcm_module_handler *mod_handler;
+	struct syna_tcm_helper *helper =
+			container_of(work, struct syna_tcm_helper, work);
+	struct syna_tcm_hcd *tcm_hcd =
+			container_of(helper, struct syna_tcm_hcd, helper);
+
+	task = atomic_read(&helper->task);
+
+	switch (task) {
+	case HELP_RUN_APPLICATION_FIRMWARE:
+		mutex_lock(&tcm_hcd->reset_mutex);
+		tcm_hcd->update_watchdog(tcm_hcd, false);
+		retval = syna_tcm_run_application_firmware(tcm_hcd);
+		if (retval < 0)
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to switch to application mode\n");
+		tcm_hcd->update_watchdog(tcm_hcd, true);
+		mutex_unlock(&tcm_hcd->reset_mutex);
+		break;
+	case HELP_SEND_RESET_NOTIFICATION:
+		mutex_lock(&tcm_hcd->reset_mutex);
+		retval = tcm_hcd->identify(tcm_hcd, true);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do identification\n");
+			mutex_unlock(&tcm_hcd->reset_mutex);
+			break;
+		}
+		mutex_lock(&mod_pool.mutex);
+		if (!list_empty(&mod_pool.list)) {
+			list_for_each_entry(mod_handler, &mod_pool.list, link) {
+				if (!mod_handler->insert &&
+						!mod_handler->detach &&
+						(mod_handler->mod_cb->reset))
+					mod_handler->mod_cb->reset(tcm_hcd);
+			}
+		}
+		mutex_unlock(&mod_pool.mutex);
+		mutex_unlock(&tcm_hcd->reset_mutex);
+		wake_up_interruptible(&tcm_hcd->hdl_wq);
+		break;
+	default:
+		break;
+	}
+
+	atomic_set(&helper->task, HELP_NONE);
+}
+
+#if defined(CONFIG_PM) || defined(CONFIG_DRM) || defined(CONFIG_FB)
+
+static int syna_tcm_deferred_probe(struct device *dev);
+
+static int syna_tcm_resume(struct device *dev)
+{
+	int retval;
+	struct syna_tcm_module_handler *mod_handler;
+	struct syna_tcm_hcd *tcm_hcd = dev_get_drvdata(dev);
+
+	if (!tcm_hcd->init_okay)
+		syna_tcm_deferred_probe(dev);
+	else {
+		if (tcm_hcd->irq_enabled) {
+			tcm_hcd->watchdog.run = false;
+			tcm_hcd->update_watchdog(tcm_hcd, false);
+			tcm_hcd->enable_irq(tcm_hcd, false, false);
+		}
+	}
+
+	if (!tcm_hcd->in_suspend)
+		return 0;
+
+	retval = pinctrl_select_state(
+			tcm_hcd->ts_pinctrl,
+			tcm_hcd->pinctrl_state_active);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"%s: Failed to select %s pinstate %d\n",
+			__func__, PINCTRL_STATE_ACTIVE, retval);
+	}
+
+	if (tcm_hcd->host_download_mode) {
+#ifndef WAKEUP_GESTURE
+		syna_tcm_check_hdl(tcm_hcd);
+		retval = syna_tcm_wait_hdl(tcm_hcd);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to wait for completion of download\n");
+			goto exit;
+		}
+#endif
+	} else {
+		tcm_hcd->enable_irq(tcm_hcd, true, NULL);
+#ifdef RESET_ON_RESUME
+		msleep(RESET_ON_RESUME_DELAY_MS);
+		goto do_reset;
+#endif
+	}
+
+	tcm_hcd->update_watchdog(tcm_hcd, true);
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION ||
+			tcm_hcd->app_status != APP_STATUS_OK) {
+		LOGN(tcm_hcd->pdev->dev.parent,
+				"Application firmware not running\n");
+		goto do_reset;
+	}
+
+	retval = tcm_hcd->sleep(tcm_hcd, false);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to exit deep sleep\n");
+		goto exit;
+	}
+
+	retval = syna_tcm_rezero(tcm_hcd);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to rezero\n");
+		goto exit;
+	}
+
+	goto mod_resume;
+
+do_reset:
+	retval = tcm_hcd->reset(tcm_hcd, false, true);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do reset\n");
+		goto exit;
+	}
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION ||
+			tcm_hcd->app_status != APP_STATUS_OK) {
+		LOGN(tcm_hcd->pdev->dev.parent,
+				"Application firmware not running\n");
+		retval = 0;
+		goto exit;
+	}
+
+mod_resume:
+	mutex_lock(&mod_pool.mutex);
+
+	if (!list_empty(&mod_pool.list)) {
+		list_for_each_entry(mod_handler, &mod_pool.list, link) {
+			if (!mod_handler->insert &&
+					!mod_handler->detach &&
+					(mod_handler->mod_cb->resume))
+				mod_handler->mod_cb->resume(tcm_hcd);
+		}
+	}
+
+	mutex_unlock(&mod_pool.mutex);
+
+	retval = 0;
+
+exit:
+	tcm_hcd->in_suspend = false;
+
+	return retval;
+}
+
+static int syna_tcm_suspend(struct device *dev)
+{
+	struct syna_tcm_module_handler *mod_handler;
+	struct syna_tcm_hcd *tcm_hcd = dev_get_drvdata(dev);
+
+	if (tcm_hcd->in_suspend || !tcm_hcd->init_okay)
+		return 0;
+
+	if (pinctrl_select_state(
+			tcm_hcd->ts_pinctrl,
+			tcm_hcd->pinctrl_state_suspend))
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"%s: Failed to select %s pinstate\n",
+			__func__, PINCTRL_STATE_RELEASE);
+
+	mutex_lock(&mod_pool.mutex);
+
+	if (!list_empty(&mod_pool.list)) {
+		list_for_each_entry(mod_handler, &mod_pool.list, link) {
+			if (!mod_handler->insert &&
+					!mod_handler->detach &&
+					(mod_handler->mod_cb->suspend))
+				mod_handler->mod_cb->suspend(tcm_hcd);
+		}
+	}
+
+	mutex_unlock(&mod_pool.mutex);
+
+	tcm_hcd->in_suspend = true;
+
+	return 0;
+}
+#endif
+
+
+
+#ifdef CONFIG_DRM
+
+static int syna_tcm_early_suspend(struct device *dev)
+{
+#ifndef WAKEUP_GESTURE
+	int retval;
+#endif
+
+	struct syna_tcm_module_handler *mod_handler;
+	struct syna_tcm_hcd *tcm_hcd = dev_get_drvdata(dev);
+
+	if (tcm_hcd->in_suspend || !tcm_hcd->init_okay)
+		return 0;
+
+	if (pinctrl_select_state(
+			tcm_hcd->ts_pinctrl,
+			tcm_hcd->pinctrl_state_suspend))
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"%s: Failed to select %s pinstate\n",
+			__func__, PINCTRL_STATE_RELEASE);
+
+	tcm_hcd->update_watchdog(tcm_hcd, false);
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION ||
+			tcm_hcd->app_status != APP_STATUS_OK) {
+		LOGN(tcm_hcd->pdev->dev.parent,
+				"Application firmware not running\n");
+		return 0;
+	}
+
+#ifndef WAKEUP_GESTURE
+	retval = tcm_hcd->sleep(tcm_hcd, true);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enter deep sleep\n");
+		return retval;
+	}
+#endif
+
+	mutex_lock(&mod_pool.mutex);
+
+	if (!list_empty(&mod_pool.list)) {
+		list_for_each_entry(mod_handler, &mod_pool.list, link) {
+			if (!mod_handler->insert &&
+					!mod_handler->detach &&
+					(mod_handler->mod_cb->early_suspend))
+				mod_handler->mod_cb->early_suspend(tcm_hcd);
+		}
+	}
+
+	mutex_unlock(&mod_pool.mutex);
+
+#ifndef WAKEUP_GESTURE
+	tcm_hcd->enable_irq(tcm_hcd, false, true);
+#endif
+
+	return 0;
+}
+
+static int syna_tcm_fb_notifier_cb(struct notifier_block *nb,
+		unsigned long action, void *data)
+{
+	int retval = 0;
+	int transition;
+	struct drm_panel_notifier *evdata = data;
+	struct syna_tcm_hcd *tcm_hcd =
+			container_of(nb, struct syna_tcm_hcd, fb_notifier);
+
+	if (!evdata)
+		return 0;
+
+	transition = *(int *)evdata->data;
+
+	if (atomic_read(&tcm_hcd->firmware_flashing)
+		&& transition == DRM_PANEL_BLANK_POWERDOWN) {
+		retval = wait_event_interruptible_timeout(tcm_hcd->reflash_wq,
+				!atomic_read(&tcm_hcd->firmware_flashing),
+				msecs_to_jiffies(RESPONSE_TIMEOUT_MS));
+		if (retval == 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Timed out waiting for flashing firmware\n");
+			atomic_set(&tcm_hcd->firmware_flashing, 0);
+			return -EIO;
+		}
+	}
+
+	if (action == DRM_PANEL_EARLY_EVENT_BLANK &&
+			transition == DRM_PANEL_BLANK_POWERDOWN)
+		retval = syna_tcm_early_suspend(&tcm_hcd->pdev->dev);
+	else if (action == DRM_PANEL_EVENT_BLANK) {
+		if (transition == DRM_PANEL_BLANK_POWERDOWN) {
+			retval = syna_tcm_suspend(&tcm_hcd->pdev->dev);
+			tcm_hcd->fb_ready = 0;
+		} else if (transition == DRM_PANEL_BLANK_UNBLANK) {
+#ifndef RESUME_EARLY_UNBLANK
+			retval = syna_tcm_resume(&tcm_hcd->pdev->dev);
+			tcm_hcd->fb_ready++;
+#endif
+		}
+	} else if (action == DRM_PANEL_EARLY_EVENT_BLANK &&
+			transition == DRM_PANEL_BLANK_UNBLANK) {
+#ifdef RESUME_EARLY_UNBLANK
+		retval = syna_tcm_resume(&tcm_hcd->pdev->dev);
+		tcm_hcd->fb_ready++;
+#endif
+	}
+
+
+	return 0;
+}
+
+#elif CONFIG_FB
+
+static int syna_tcm_early_suspend(struct device *dev)
+{
+#ifndef WAKEUP_GESTURE
+	int retval;
+#endif
+
+	struct syna_tcm_module_handler *mod_handler;
+	struct syna_tcm_hcd *tcm_hcd = dev_get_drvdata(dev);
+
+	if (tcm_hcd->in_suspend)
+		return 0;
+
+	tcm_hcd->update_watchdog(tcm_hcd, false);
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION ||
+			tcm_hcd->app_status != APP_STATUS_OK) {
+		LOGN(tcm_hcd->pdev->dev.parent,
+				"Application firmware not running\n");
+		return 0;
+	}
+
+#ifndef WAKEUP_GESTURE
+	retval = tcm_hcd->sleep(tcm_hcd, true);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enter deep sleep\n");
+		return retval;
+	}
+#endif
+
+	mutex_lock(&mod_pool.mutex);
+
+	if (!list_empty(&mod_pool.list)) {
+		list_for_each_entry(mod_handler, &mod_pool.list, link) {
+			if (!mod_handler->insert &&
+					!mod_handler->detach &&
+					(mod_handler->mod_cb->early_suspend))
+				mod_handler->mod_cb->early_suspend(tcm_hcd);
+		}
+	}
+
+	mutex_unlock(&mod_pool.mutex);
+
+	return 0;
+}
+
+static int syna_tcm_fb_notifier_cb(struct notifier_block *nb,
+		unsigned long action, void *data)
+{
+	int retval = 0;
+	int *transition;
+	struct fb_event *evdata = data;
+	struct syna_tcm_hcd *tcm_hcd =
+			container_of(nb, struct syna_tcm_hcd, fb_notifier);
+
+	if (!evdata || !evdata->data || !tcm_hcd)
+		return 0;
+
+	transition = (int *)evdata->data;
+
+	if (atomic_read(&tcm_hcd->firmware_flashing)
+		&& *transition == FB_BLANK_POWERDOWN) {
+		retval = wait_event_interruptible_timeout(tcm_hcd->reflash_wq,
+				!atomic_read(&tcm_hcd->firmware_flashing),
+				msecs_to_jiffies(RESPONSE_TIMEOUT_MS));
+		if (retval == 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Timed out waiting for flashing firmware\n");
+			atomic_set(&tcm_hcd->firmware_flashing, 0);
+			return -EIO;
+		}
+	}
+
+	if (action == FB_EARLY_EVENT_BLANK &&
+			*transition == FB_BLANK_POWERDOWN)
+		retval = syna_tcm_early_suspend(&tcm_hcd->pdev->dev);
+	else if (action == FB_EVENT_BLANK) {
+		if (*transition == FB_BLANK_POWERDOWN) {
+			retval = syna_tcm_suspend(&tcm_hcd->pdev->dev);
+			tcm_hcd->fb_ready = 0;
+		} else if (*transition == FB_BLANK_UNBLANK) {
+#ifndef RESUME_EARLY_UNBLANK
+			retval = syna_tcm_resume(&tcm_hcd->pdev->dev);
+			tcm_hcd->fb_ready++;
+#endif
+		}
+	} else if (action == FB_EARLY_EVENT_BLANK &&
+			*transition == FB_BLANK_UNBLANK) {
+#ifdef RESUME_EARLY_UNBLANK
+		retval = syna_tcm_resume(&tcm_hcd->pdev->dev);
+		tcm_hcd->fb_ready++;
+#endif
+	}
+
+	return 0;
+}
+#endif
+
+static int synaptics_tcm_pinctrl_init(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval = 0;
+
+	/* Get pinctrl if target uses pinctrl */
+	tcm_hcd->ts_pinctrl = devm_pinctrl_get((tcm_hcd->pdev->dev.parent));
+	if (IS_ERR_OR_NULL(tcm_hcd->ts_pinctrl)) {
+		retval = PTR_ERR(tcm_hcd->ts_pinctrl);
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Target does not use pinctrl %d\n", retval);
+		goto err_pinctrl_get;
+	}
+
+	tcm_hcd->pinctrl_state_active
+		= pinctrl_lookup_state(tcm_hcd->ts_pinctrl, "pmx_ts_active");
+	if (IS_ERR_OR_NULL(tcm_hcd->pinctrl_state_active)) {
+		retval = PTR_ERR(tcm_hcd->pinctrl_state_active);
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_ACTIVE, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	tcm_hcd->pinctrl_state_suspend
+		= pinctrl_lookup_state(tcm_hcd->ts_pinctrl, "pmx_ts_suspend");
+	if (IS_ERR_OR_NULL(tcm_hcd->pinctrl_state_suspend)) {
+		retval = PTR_ERR(tcm_hcd->pinctrl_state_suspend);
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_SUSPEND, retval);
+		goto err_pinctrl_lookup;
+	}
+
+	tcm_hcd->pinctrl_state_release
+		= pinctrl_lookup_state(tcm_hcd->ts_pinctrl, "pmx_ts_release");
+	if (IS_ERR_OR_NULL(tcm_hcd->pinctrl_state_release)) {
+		retval = PTR_ERR(tcm_hcd->pinctrl_state_release);
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Can not lookup %s pinstate %d\n",
+			PINCTRL_STATE_RELEASE, retval);
+	}
+
+	return retval;
+
+err_pinctrl_lookup:
+	devm_pinctrl_put(tcm_hcd->ts_pinctrl);
+err_pinctrl_get:
+	tcm_hcd->ts_pinctrl = NULL;
+	return retval;
+}
+
+static int syna_tcm_probe(struct platform_device *pdev)
+{
+	int retval;
+	int idx;
+	struct syna_tcm_hcd *tcm_hcd;
+	const struct syna_tcm_board_data *bdata;
+	const struct syna_tcm_hw_interface *hw_if;
+	struct drm_panel *active_panel = tcm_get_panel();
+
+	hw_if = pdev->dev.platform_data;
+	if (!hw_if) {
+		LOGE(&pdev->dev,
+				"Hardware interface not found\n");
+		return -ENODEV;
+	}
+
+	bdata = hw_if->bdata;
+	if (!bdata) {
+		LOGE(&pdev->dev,
+				"Board data not found\n");
+		return -ENODEV;
+	}
+
+	tcm_hcd = kzalloc(sizeof(*tcm_hcd), GFP_KERNEL);
+	if (!tcm_hcd) {
+		LOGE(&pdev->dev,
+				"Failed to allocate memory for tcm_hcd\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, tcm_hcd);
+
+	tcm_hcd->pdev = pdev;
+	tcm_hcd->hw_if = hw_if;
+	tcm_hcd->reset = syna_tcm_reset;
+	tcm_hcd->sleep = syna_tcm_sleep;
+	tcm_hcd->identify = syna_tcm_identify;
+	tcm_hcd->enable_irq = syna_tcm_enable_irq;
+	tcm_hcd->switch_mode = syna_tcm_switch_mode;
+	tcm_hcd->read_message = syna_tcm_read_message;
+	tcm_hcd->write_message = syna_tcm_write_message;
+	tcm_hcd->get_dynamic_config = syna_tcm_get_dynamic_config;
+	tcm_hcd->set_dynamic_config = syna_tcm_set_dynamic_config;
+	tcm_hcd->get_data_location = syna_tcm_get_data_location;
+
+	tcm_hcd->rd_chunk_size = RD_CHUNK_SIZE;
+	tcm_hcd->wr_chunk_size = WR_CHUNK_SIZE;
+
+#ifdef PREDICTIVE_READING
+	tcm_hcd->read_length = MIN_READ_LENGTH;
+#else
+	tcm_hcd->read_length = MESSAGE_HEADER_SIZE;
+#endif
+
+	tcm_hcd->watchdog.run = RUN_WATCHDOG;
+	tcm_hcd->update_watchdog = syna_tcm_update_watchdog;
+
+	if (bdata->irq_gpio >= 0)
+		tcm_hcd->irq = gpio_to_irq(bdata->irq_gpio);
+	else
+		tcm_hcd->irq = bdata->irq_gpio;
+
+	mutex_init(&tcm_hcd->extif_mutex);
+	mutex_init(&tcm_hcd->reset_mutex);
+	mutex_init(&tcm_hcd->irq_en_mutex);
+	mutex_init(&tcm_hcd->io_ctrl_mutex);
+	mutex_init(&tcm_hcd->rw_ctrl_mutex);
+	mutex_init(&tcm_hcd->command_mutex);
+	mutex_init(&tcm_hcd->identify_mutex);
+
+	INIT_BUFFER(tcm_hcd->in, false);
+	INIT_BUFFER(tcm_hcd->out, false);
+	INIT_BUFFER(tcm_hcd->resp, true);
+	INIT_BUFFER(tcm_hcd->temp, false);
+	INIT_BUFFER(tcm_hcd->config, false);
+	INIT_BUFFER(tcm_hcd->report.buffer, true);
+
+	LOCK_BUFFER(tcm_hcd->in);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&tcm_hcd->in,
+			tcm_hcd->read_length + 1);
+	if (retval < 0) {
+		LOGE(&pdev->dev,
+			"Failed to allocate memory for tcm_hcd->in.buf\n");
+		UNLOCK_BUFFER(tcm_hcd->in);
+		goto err_alloc_mem;
+	}
+
+	UNLOCK_BUFFER(tcm_hcd->in);
+
+	atomic_set(&tcm_hcd->command_status, CMD_IDLE);
+
+	atomic_set(&tcm_hcd->helper.task, HELP_NONE);
+
+	device_init_wakeup(&pdev->dev, 1);
+
+	init_waitqueue_head(&tcm_hcd->hdl_wq);
+
+	init_waitqueue_head(&tcm_hcd->reflash_wq);
+	atomic_set(&tcm_hcd->firmware_flashing, 0);
+
+	if (!mod_pool.initialized) {
+		mutex_init(&mod_pool.mutex);
+		INIT_LIST_HEAD(&mod_pool.list);
+		mod_pool.initialized = true;
+	}
+
+	retval = syna_tcm_get_regulator(tcm_hcd, true);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to get regulators\n");
+		goto err_get_regulator;
+	}
+
+	retval = synaptics_tcm_pinctrl_init(tcm_hcd);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent, "Failed to init pinctrl\n");
+		goto err_pinctrl_init;
+	}
+
+	sysfs_dir = kobject_create_and_add(PLATFORM_DRIVER_NAME,
+			&pdev->dev.kobj);
+	if (!sysfs_dir) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create sysfs directory\n");
+		retval = -EINVAL;
+		goto err_sysfs_create_dir;
+	}
+
+	tcm_hcd->sysfs_dir = sysfs_dir;
+
+	for (idx = 0; idx < ARRAY_SIZE(attrs); idx++) {
+		retval = sysfs_create_file(tcm_hcd->sysfs_dir,
+				&(*attrs[idx]).attr);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to create sysfs file\n");
+			goto err_sysfs_create_file;
+		}
+	}
+
+	tcm_hcd->dynamnic_config_sysfs_dir =
+			kobject_create_and_add(DYNAMIC_CONFIG_SYSFS_DIR_NAME,
+			tcm_hcd->sysfs_dir);
+	if (!tcm_hcd->dynamnic_config_sysfs_dir) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to create dynamic config sysfs directory\n");
+		retval = -EINVAL;
+		goto err_sysfs_create_dynamic_config_dir;
+	}
+
+	for (idx = 0; idx < ARRAY_SIZE(dynamic_config_attrs); idx++) {
+		retval = sysfs_create_file(tcm_hcd->dynamnic_config_sysfs_dir,
+				&(*dynamic_config_attrs[idx]).attr);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create dynamic config sysfs file\n");
+			goto err_sysfs_create_dynamic_config_file;
+		}
+	}
+
+#ifdef CONFIG_DRM
+	tcm_hcd->fb_notifier.notifier_call = syna_tcm_fb_notifier_cb;
+	if (active_panel) {
+		retval = drm_panel_notifier_register(active_panel,
+				&tcm_hcd->fb_notifier);
+		if (retval < 0) {
+			dev_err(&pdev->dev,
+					"%s: Failed to register fb notifier client\n",
+					__func__);
+			goto err_drm_reg;
+		}
+	}
+
+#elif CONFIG_FB
+	tcm_hcd->fb_notifier.notifier_call = syna_tcm_fb_notifier_cb;
+	retval = fb_register_client(&tcm_hcd->fb_notifier);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to register FB notifier client\n");
+	}
+#endif
+
+	tcm_hcd->notifier_thread = kthread_run(syna_tcm_report_notifier,
+			tcm_hcd, "syna_tcm_report_notifier");
+	if (IS_ERR(tcm_hcd->notifier_thread)) {
+		retval = PTR_ERR(tcm_hcd->notifier_thread);
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to create and run tcm_hcd->notifier_thread\n");
+		goto err_create_run_kthread;
+	}
+
+	tcm_hcd->helper.workqueue =
+			create_singlethread_workqueue("syna_tcm_helper");
+	INIT_WORK(&tcm_hcd->helper.work, syna_tcm_helper_work);
+
+	tcm_hcd->watchdog.workqueue =
+			create_singlethread_workqueue("syna_tcm_watchdog");
+	INIT_DELAYED_WORK(&tcm_hcd->watchdog.work, syna_tcm_watchdog_work);
+
+	tcm_hcd->polling_workqueue =
+			create_singlethread_workqueue("syna_tcm_polling");
+	INIT_DELAYED_WORK(&tcm_hcd->polling_work, syna_tcm_polling_work);
+
+	mod_pool.workqueue =
+			create_singlethread_workqueue("syna_tcm_module");
+	INIT_WORK(&mod_pool.work, syna_tcm_module_work);
+	mod_pool.tcm_hcd = tcm_hcd;
+	mod_pool.queue_work = true;
+	mod_pool.reconstructing = false;
+
+	return 0;
+
+
+err_create_run_kthread:
+#ifdef CONFIG_DRM
+	if (active_panel)
+		drm_panel_notifier_unregister(active_panel,
+				&tcm_hcd->fb_notifier);
+#elif CONFIG_FB
+	fb_unregister_client(&tcm_hcd->fb_notifier);
+#endif
+
+err_sysfs_create_dynamic_config_file:
+	for (idx--; idx >= 0; idx--) {
+		sysfs_remove_file(tcm_hcd->dynamnic_config_sysfs_dir,
+				&(*dynamic_config_attrs[idx]).attr);
+	}
+
+	kobject_put(tcm_hcd->dynamnic_config_sysfs_dir);
+
+	idx = ARRAY_SIZE(attrs);
+
+err_sysfs_create_dynamic_config_dir:
+err_sysfs_create_file:
+	for (idx--; idx >= 0; idx--)
+		sysfs_remove_file(tcm_hcd->sysfs_dir, &(*attrs[idx]).attr);
+
+	kobject_put(tcm_hcd->sysfs_dir);
+
+err_sysfs_create_dir:
+err_pinctrl_init:
+err_get_regulator:
+	device_init_wakeup(&pdev->dev, 0);
+
+err_alloc_mem:
+	RELEASE_BUFFER(tcm_hcd->report.buffer);
+	RELEASE_BUFFER(tcm_hcd->config);
+	RELEASE_BUFFER(tcm_hcd->temp);
+	RELEASE_BUFFER(tcm_hcd->resp);
+	RELEASE_BUFFER(tcm_hcd->out);
+	RELEASE_BUFFER(tcm_hcd->in);
+
+err_drm_reg:
+	kfree(tcm_hcd);
+
+	return retval;
+}
+
+static int syna_tcm_deferred_probe(struct device *dev)
+{
+	int retval;
+	const struct syna_tcm_board_data *bdata;
+	struct syna_tcm_hcd *tcm_hcd = dev_get_drvdata(dev);
+
+	retval = pinctrl_select_state(
+			tcm_hcd->ts_pinctrl,
+			tcm_hcd->pinctrl_state_active);
+
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to pinctrl_select_state\n");
+		goto err_pinctrl_select_state;
+	}
+
+	retval = syna_tcm_enable_regulator(tcm_hcd, true);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enable regulators\n");
+		goto err_enable_regulator;
+	}
+
+	retval = syna_tcm_config_gpio(tcm_hcd);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to configure GPIO's\n");
+		goto err_config_gpio;
+	}
+
+	retval = tcm_hcd->enable_irq(tcm_hcd, true, NULL);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enable interrupt\n");
+		goto err_enable_irq;
+	}
+	retval = tcm_hcd->reset(tcm_hcd, false, false);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do reset\n");
+		tcm_hcd->init_okay = false;
+		tcm_hcd->watchdog.run = false;
+		tcm_hcd->update_watchdog(tcm_hcd, false);
+		tcm_hcd->enable_irq(tcm_hcd, false, false);
+#ifndef KEEP_DRIVER_ON_ERROR
+		goto err_reset;
+#endif
+	} else {
+		tcm_hcd->init_okay = true;
+		tcm_hcd->update_watchdog(tcm_hcd, true);
+	}
+
+	queue_work(mod_pool.workqueue, &mod_pool.work);
+
+	return 0;
+#ifndef KEEP_DRIVER_ON_ERROR
+err_reset:
+#endif
+err_enable_irq:
+
+err_config_gpio:
+	syna_tcm_enable_regulator(tcm_hcd, false);
+
+err_enable_regulator:
+	syna_tcm_get_regulator(tcm_hcd, false);
+
+err_pinctrl_select_state:
+	if (!tcm_hcd->hw_if || !tcm_hcd->hw_if->bdata)
+		return -EINVAL;
+
+	bdata = tcm_hcd->hw_if->bdata;
+
+	if (bdata->irq_gpio >= 0)
+		syna_tcm_set_gpio(tcm_hcd, bdata->irq_gpio, false, 0, 0);
+
+	if (bdata->power_gpio >= 0)
+		syna_tcm_set_gpio(tcm_hcd, bdata->power_gpio, false, 0, 0);
+
+	if (bdata->reset_gpio >= 0)
+		syna_tcm_set_gpio(tcm_hcd, bdata->reset_gpio, false, 0, 0);
+
+	return retval;
+}
+
+
+static int syna_tcm_remove(struct platform_device *pdev)
+{
+	int idx;
+	struct syna_tcm_module_handler *mod_handler;
+	struct syna_tcm_module_handler *tmp_handler;
+	struct syna_tcm_hcd *tcm_hcd = platform_get_drvdata(pdev);
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+	struct drm_panel *active_panel = tcm_get_panel();
+
+	mutex_lock(&mod_pool.mutex);
+
+	if (!list_empty(&mod_pool.list)) {
+		list_for_each_entry_safe(mod_handler, tmp_handler,
+				&mod_pool.list, link) {
+			if (mod_handler->mod_cb->remove)
+				mod_handler->mod_cb->remove(tcm_hcd);
+			list_del(&mod_handler->link);
+			kfree(mod_handler);
+		}
+	}
+
+	mod_pool.queue_work = false;
+	cancel_work_sync(&mod_pool.work);
+	flush_workqueue(mod_pool.workqueue);
+	destroy_workqueue(mod_pool.workqueue);
+
+	mutex_unlock(&mod_pool.mutex);
+
+	if (tcm_hcd->irq_enabled && bdata->irq_gpio >= 0) {
+		disable_irq(tcm_hcd->irq);
+		free_irq(tcm_hcd->irq, tcm_hcd);
+	}
+
+	cancel_delayed_work_sync(&tcm_hcd->polling_work);
+	flush_workqueue(tcm_hcd->polling_workqueue);
+	destroy_workqueue(tcm_hcd->polling_workqueue);
+
+	cancel_delayed_work_sync(&tcm_hcd->watchdog.work);
+	flush_workqueue(tcm_hcd->watchdog.workqueue);
+	destroy_workqueue(tcm_hcd->watchdog.workqueue);
+
+	cancel_work_sync(&tcm_hcd->helper.work);
+	flush_workqueue(tcm_hcd->helper.workqueue);
+	destroy_workqueue(tcm_hcd->helper.workqueue);
+
+	kthread_stop(tcm_hcd->notifier_thread);
+
+#ifdef CONFIG_DRM
+	if (active_panel)
+		drm_panel_notifier_unregister(active_panel,
+				&tcm_hcd->fb_notifier);
+#elif CONFIG_FB
+	fb_unregister_client(&tcm_hcd->fb_notifier);
+#endif
+
+	for (idx = 0; idx < ARRAY_SIZE(dynamic_config_attrs); idx++) {
+		sysfs_remove_file(tcm_hcd->dynamnic_config_sysfs_dir,
+				&(*dynamic_config_attrs[idx]).attr);
+	}
+
+	kobject_put(tcm_hcd->dynamnic_config_sysfs_dir);
+
+	for (idx = 0; idx < ARRAY_SIZE(attrs); idx++)
+		sysfs_remove_file(tcm_hcd->sysfs_dir, &(*attrs[idx]).attr);
+
+	kobject_put(tcm_hcd->sysfs_dir);
+
+	if (bdata->irq_gpio >= 0)
+		syna_tcm_set_gpio(tcm_hcd, bdata->irq_gpio, false, 0, 0);
+
+	if (bdata->power_gpio >= 0)
+		syna_tcm_set_gpio(tcm_hcd, bdata->power_gpio, false, 0, 0);
+
+	if (bdata->reset_gpio >= 0)
+		syna_tcm_set_gpio(tcm_hcd, bdata->reset_gpio, false, 0, 0);
+
+	syna_tcm_enable_regulator(tcm_hcd, false);
+
+	syna_tcm_get_regulator(tcm_hcd, false);
+
+	device_init_wakeup(&pdev->dev, 0);
+
+	RELEASE_BUFFER(tcm_hcd->report.buffer);
+	RELEASE_BUFFER(tcm_hcd->config);
+	RELEASE_BUFFER(tcm_hcd->temp);
+	RELEASE_BUFFER(tcm_hcd->resp);
+	RELEASE_BUFFER(tcm_hcd->out);
+	RELEASE_BUFFER(tcm_hcd->in);
+
+	kfree(tcm_hcd);
+
+	return 0;
+}
+
+static void syna_tcm_shutdown(struct platform_device *pdev)
+{
+	syna_tcm_remove(pdev);
+}
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops syna_tcm_dev_pm_ops = {
+#if !defined(CONFIG_DRM) && !defined(CONFIG_FB)
+	.suspend = syna_tcm_suspend,
+	.resume = syna_tcm_resume,
+#endif
+};
+#endif
+
+static struct platform_driver syna_tcm_driver = {
+	.driver = {
+		.name = PLATFORM_DRIVER_NAME,
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &syna_tcm_dev_pm_ops,
+#endif
+	},
+	.probe = syna_tcm_probe,
+	.remove = syna_tcm_remove,
+	.shutdown = syna_tcm_shutdown,
+};
+
+static int __init syna_tcm_module_init(void)
+{
+	int retval;
+
+	retval = syna_tcm_bus_init();
+	if (retval < 0)
+		return retval;
+
+	return platform_driver_register(&syna_tcm_driver);
+}
+
+static void __exit syna_tcm_module_exit(void)
+{
+	platform_driver_unregister(&syna_tcm_driver);
+
+	syna_tcm_bus_exit();
+}
+
+late_initcall(syna_tcm_module_init);
+module_exit(syna_tcm_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics TCM Touch Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_core.h b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_core.h
new file mode 100644
index 0000000..9bbccc7
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_core.h
@@ -0,0 +1,680 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_TCM_CORE_H_
+#define _SYNAPTICS_TCM_CORE_H_
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/input/synaptics_tcm.h>
+#ifdef CONFIG_DRM
+#include <drm/drm_panel.h>
+#elif CONFIG_FB
+#include <linux/fb.h>
+#include <linux/notifier.h>
+#endif
+#include <uapi/linux/sched/types.h>
+
+#define SYNAPTICS_TCM_ID_PRODUCT (1 << 0)
+#define SYNAPTICS_TCM_ID_VERSION 0x0101
+#define SYNAPTICS_TCM_ID_SUBVERSION 0
+
+#define PLATFORM_DRIVER_NAME "synaptics_tcm"
+
+#define TOUCH_INPUT_NAME "synaptics_tcm_touch"
+#define TOUCH_INPUT_PHYS_PATH "synaptics_tcm/touch_input"
+
+/* #define WAKEUP_GESTURE */
+
+#define RD_CHUNK_SIZE 0 /* read length limit in bytes, 0 = unlimited */
+#define WR_CHUNK_SIZE 0 /* write length limit in bytes, 0 = unlimited */
+
+#define MESSAGE_HEADER_SIZE 4
+#define MESSAGE_MARKER 0xa5
+#define MESSAGE_PADDING 0x5a
+
+#define LOGx(func, dev, log, ...) \
+	func(dev, "%s: " log, __func__, ##__VA_ARGS__)
+
+#define LOGy(func, dev, log, ...) \
+	func(dev, "%s (line %d): " log, __func__, __LINE__, ##__VA_ARGS__)
+
+#define LOGD(dev, log, ...) LOGx(dev_dbg, dev, log, ##__VA_ARGS__)
+#define LOGI(dev, log, ...) LOGx(dev_info, dev, log, ##__VA_ARGS__)
+#define LOGN(dev, log, ...) LOGx(dev_notice, dev, log, ##__VA_ARGS__)
+#define LOGW(dev, log, ...) LOGy(dev_warn, dev, log, ##__VA_ARGS__)
+#define LOGE(dev, log, ...) LOGy(dev_err, dev, log, ##__VA_ARGS__)
+
+#define INIT_BUFFER(buffer, is_clone) \
+	mutex_init(&buffer.buf_mutex); \
+	buffer.clone = is_clone
+
+#define LOCK_BUFFER(buffer) \
+	mutex_lock(&buffer.buf_mutex)
+
+#define UNLOCK_BUFFER(buffer) \
+	mutex_unlock(&buffer.buf_mutex)
+
+#define RELEASE_BUFFER(buffer) \
+	do { \
+		if (buffer.clone == false) { \
+			kfree(buffer.buf); \
+			buffer.buf_size = 0; \
+			buffer.data_length = 0; \
+		} \
+	} while (0)
+
+#define MAX(a, b) \
+	({__typeof__(a) _a = (a); \
+	__typeof__(b) _b = (b); \
+	_a > _b ? _a : _b; })
+
+#define MIN(a, b) \
+	({__typeof__(a) _a = (a); \
+	__typeof__(b) _b = (b); \
+	_a < _b ? _a : _b; })
+
+#define STR(x) #x
+
+#define CONCAT(a, b) a##b
+
+#define SHOW_PROTOTYPE(m_name, a_name) \
+static ssize_t CONCAT(m_name##_sysfs, _##a_name##_show)(struct device *dev, \
+		struct device_attribute *attr, char *buf); \
+\
+static struct device_attribute dev_attr_##a_name = \
+		__ATTR(a_name, 0444, \
+		CONCAT(m_name##_sysfs, _##a_name##_show), \
+		syna_tcm_store_error)
+
+#define STORE_PROTOTYPE(m_name, a_name) \
+static ssize_t CONCAT(m_name##_sysfs, _##a_name##_store)(struct device *dev, \
+		struct device_attribute *attr, const char *buf, size_t count); \
+\
+static struct device_attribute dev_attr_##a_name = \
+		__ATTR(a_name, 0220, \
+		syna_tcm_show_error, \
+		CONCAT(m_name##_sysfs, _##a_name##_store))
+
+#define SHOW_STORE_PROTOTYPE(m_name, a_name) \
+static ssize_t CONCAT(m_name##_sysfs, _##a_name##_show)(struct device *dev, \
+		struct device_attribute *attr, char *buf); \
+\
+static ssize_t CONCAT(m_name##_sysfs, _##a_name##_store)(struct device *dev, \
+		struct device_attribute *attr, const char *buf, size_t count); \
+\
+static struct device_attribute dev_attr_##a_name = \
+		__ATTR(a_name, 0664, \
+		CONCAT(m_name##_sysfs, _##a_name##_show), \
+		CONCAT(m_name##_sysfs, _##a_name##_store))
+
+#define ATTRIFY(a_name) (&dev_attr_##a_name)
+
+#define PINCTRL_STATE_ACTIVE    "pmx_ts_active"
+#define PINCTRL_STATE_SUSPEND   "pmx_ts_suspend"
+#define PINCTRL_STATE_RELEASE   "pmx_ts_release"
+
+enum module_type {
+	TCM_TOUCH = 0,
+	TCM_DEVICE = 1,
+	TCM_TESTING = 2,
+	TCM_REFLASH = 3,
+	TCM_RECOVERY = 4,
+	TCM_ZEROFLASH = 5,
+	TCM_DIAGNOSTICS = 6,
+	TCM_LAST,
+};
+
+enum boot_mode {
+	MODE_APPLICATION = 0x01,
+	MODE_HOST_DOWNLOAD = 0x02,
+	MODE_BOOTLOADER = 0x0b,
+	MODE_TDDI_BOOTLOADER = 0x0c,
+	MODE_PRODUCTION_TEST = 0x0e,
+};
+
+enum boot_status {
+	BOOT_STATUS_OK = 0x00,
+	BOOT_STATUS_BOOTING = 0x01,
+	BOOT_STATUS_APP_BAD_DISPLAY_CRC = 0xfc,
+	BOOT_STATUS_BAD_DISPLAY_CONFIG = 0xfd,
+	BOOT_STATUS_BAD_APP_FIRMWARE = 0xfe,
+	BOOT_STATUS_WARM_BOOT = 0xff,
+};
+
+enum app_status {
+	APP_STATUS_OK = 0x00,
+	APP_STATUS_BOOTING = 0x01,
+	APP_STATUS_UPDATING = 0x02,
+	APP_STATUS_BAD_APP_CONFIG = 0xff,
+};
+
+enum firmware_mode {
+	FW_MODE_BOOTLOADER = 0,
+	FW_MODE_APPLICATION = 1,
+	FW_MODE_PRODUCTION_TEST = 2,
+};
+
+enum dynamic_config_id {
+	DC_UNKNOWN = 0x00,
+	DC_NO_DOZE,
+	DC_DISABLE_NOISE_MITIGATION,
+	DC_INHIBIT_FREQUENCY_SHIFT,
+	DC_REQUESTED_FREQUENCY,
+	DC_DISABLE_HSYNC,
+	DC_REZERO_ON_EXIT_DEEP_SLEEP,
+	DC_CHARGER_CONNECTED,
+	DC_NO_BASELINE_RELAXATION,
+	DC_IN_WAKEUP_GESTURE_MODE,
+	DC_STIMULUS_FINGERS,
+	DC_GRIP_SUPPRESSION_ENABLED,
+	DC_ENABLE_THICK_GLOVE,
+	DC_ENABLE_GLOVE,
+};
+
+enum command {
+	CMD_NONE = 0x00,
+	CMD_CONTINUE_WRITE = 0x01,
+	CMD_IDENTIFY = 0x02,
+	CMD_RESET = 0x04,
+	CMD_ENABLE_REPORT = 0x05,
+	CMD_DISABLE_REPORT = 0x06,
+	CMD_GET_BOOT_INFO = 0x10,
+	CMD_ERASE_FLASH = 0x11,
+	CMD_WRITE_FLASH = 0x12,
+	CMD_READ_FLASH = 0x13,
+	CMD_RUN_APPLICATION_FIRMWARE = 0x14,
+	CMD_SPI_MASTER_WRITE_THEN_READ = 0x15,
+	CMD_REBOOT_TO_ROM_BOOTLOADER = 0x16,
+	CMD_RUN_BOOTLOADER_FIRMWARE = 0x1f,
+	CMD_GET_APPLICATION_INFO = 0x20,
+	CMD_GET_STATIC_CONFIG = 0x21,
+	CMD_SET_STATIC_CONFIG = 0x22,
+	CMD_GET_DYNAMIC_CONFIG = 0x23,
+	CMD_SET_DYNAMIC_CONFIG = 0x24,
+	CMD_GET_TOUCH_REPORT_CONFIG = 0x25,
+	CMD_SET_TOUCH_REPORT_CONFIG = 0x26,
+	CMD_REZERO = 0x27,
+	CMD_COMMIT_CONFIG = 0x28,
+	CMD_DESCRIBE_DYNAMIC_CONFIG = 0x29,
+	CMD_PRODUCTION_TEST = 0x2a,
+	CMD_SET_CONFIG_ID = 0x2b,
+	CMD_ENTER_DEEP_SLEEP = 0x2c,
+	CMD_EXIT_DEEP_SLEEP = 0x2d,
+	CMD_GET_TOUCH_INFO = 0x2e,
+	CMD_GET_DATA_LOCATION = 0x2f,
+	CMD_DOWNLOAD_CONFIG = 0x30,
+	CMD_ENTER_PRODUCTION_TEST_MODE = 0x31,
+	CMD_GET_FEATURES = 0x32,
+};
+
+enum status_code {
+	STATUS_IDLE = 0x00,
+	STATUS_OK = 0x01,
+	STATUS_BUSY = 0x02,
+	STATUS_CONTINUED_READ = 0x03,
+	STATUS_NOT_EXECUTED_IN_DEEP_SLEEP = 0x0b,
+	STATUS_RECEIVE_BUFFER_OVERFLOW = 0x0c,
+	STATUS_PREVIOUS_COMMAND_PENDING = 0x0d,
+	STATUS_NOT_IMPLEMENTED = 0x0e,
+	STATUS_ERROR = 0x0f,
+	STATUS_INVALID = 0xff,
+};
+
+enum report_type {
+	REPORT_IDENTIFY = 0x10,
+	REPORT_TOUCH = 0x11,
+	REPORT_DELTA = 0x12,
+	REPORT_RAW = 0x13,
+	REPORT_STATUS = 0x1b,
+	REPORT_PRINTF = 0x82,
+	REPORT_HDL = 0xfe,
+};
+
+enum command_status {
+	CMD_IDLE = 0,
+	CMD_BUSY = 1,
+	CMD_ERROR = -1,
+};
+
+enum flash_area {
+	BOOTLOADER = 0,
+	BOOT_CONFIG,
+	APP_FIRMWARE,
+	APP_CONFIG,
+	DISP_CONFIG,
+	CUSTOM_OTP,
+	CUSTOM_LCM,
+	CUSTOM_OEM,
+	PPDT,
+};
+
+enum flash_data {
+	LCM_DATA = 1,
+	OEM_DATA,
+	PPDT_DATA,
+};
+
+enum helper_task {
+	HELP_NONE = 0,
+	HELP_RUN_APPLICATION_FIRMWARE,
+	HELP_SEND_RESET_NOTIFICATION,
+};
+
+struct syna_tcm_helper {
+	atomic_t task;
+	struct work_struct work;
+	struct workqueue_struct *workqueue;
+};
+
+struct syna_tcm_watchdog {
+	bool run;
+	unsigned char count;
+	struct delayed_work work;
+	struct workqueue_struct *workqueue;
+};
+
+struct syna_tcm_buffer {
+	bool clone;
+	unsigned char *buf;
+	unsigned int buf_size;
+	unsigned int data_length;
+	struct mutex buf_mutex;
+};
+
+struct syna_tcm_report {
+	unsigned char id;
+	struct syna_tcm_buffer buffer;
+};
+
+struct syna_tcm_identification {
+	unsigned char version;
+	unsigned char mode;
+	unsigned char part_number[16];
+	unsigned char build_id[4];
+	unsigned char max_write_size[2];
+};
+
+struct syna_tcm_boot_info {
+	unsigned char version;
+	unsigned char status;
+	unsigned char asic_id[2];
+	unsigned char write_block_size_words;
+	unsigned char erase_page_size_words[2];
+	unsigned char max_write_payload_size[2];
+	unsigned char last_reset_reason;
+	unsigned char pc_at_time_of_last_reset[2];
+	unsigned char boot_config_start_block[2];
+	unsigned char boot_config_size_blocks[2];
+	unsigned char display_config_start_block[4];
+	unsigned char display_config_length_blocks[2];
+	unsigned char backup_display_config_start_block[4];
+	unsigned char backup_display_config_length_blocks[2];
+	unsigned char custom_otp_start_block[2];
+	unsigned char custom_otp_length_blocks[2];
+};
+
+struct syna_tcm_app_info {
+	unsigned char version[2];
+	unsigned char status[2];
+	unsigned char static_config_size[2];
+	unsigned char dynamic_config_size[2];
+	unsigned char app_config_start_write_block[2];
+	unsigned char app_config_size[2];
+	unsigned char max_touch_report_config_size[2];
+	unsigned char max_touch_report_payload_size[2];
+	unsigned char customer_config_id[16];
+	unsigned char max_x[2];
+	unsigned char max_y[2];
+	unsigned char max_objects[2];
+	unsigned char num_of_buttons[2];
+	unsigned char num_of_image_rows[2];
+	unsigned char num_of_image_cols[2];
+	unsigned char has_hybrid_data[2];
+};
+
+struct syna_tcm_touch_info {
+	unsigned char image_2d_scale_factor[4];
+	unsigned char image_0d_scale_factor[4];
+	unsigned char hybrid_x_scale_factor[4];
+	unsigned char hybrid_y_scale_factor[4];
+};
+
+struct syna_tcm_message_header {
+	unsigned char marker;
+	unsigned char code;
+	unsigned char length[2];
+};
+
+struct syna_tcm_features {
+	unsigned char byte_0_reserved;
+	unsigned char byte_1_reserved;
+	unsigned char dual_firmware:1;
+	unsigned char byte_2_reserved:7;
+} __packed;
+
+struct syna_tcm_hcd {
+	pid_t isr_pid;
+	atomic_t command_status;
+	atomic_t host_downloading;
+	atomic_t firmware_flashing;
+	wait_queue_head_t hdl_wq;
+	wait_queue_head_t reflash_wq;
+	int irq;
+	bool init_okay;
+	bool do_polling;
+	bool in_suspend;
+	bool irq_enabled;
+	bool host_download_mode;
+	unsigned char marker;
+	unsigned char fb_ready;
+	unsigned char command;
+	unsigned char async_report_id;
+	unsigned char status_report_code;
+	unsigned char response_code;
+	unsigned int read_length;
+	unsigned int payload_length;
+	unsigned int packrat_number;
+	unsigned int rd_chunk_size;
+	unsigned int wr_chunk_size;
+	unsigned int app_status;
+	struct platform_device *pdev;
+	struct regulator *pwr_reg;
+	struct regulator *bus_reg;
+	struct kobject *sysfs_dir;
+	struct kobject *dynamnic_config_sysfs_dir;
+	struct mutex extif_mutex;
+	struct mutex reset_mutex;
+	struct mutex irq_en_mutex;
+	struct mutex io_ctrl_mutex;
+	struct mutex rw_ctrl_mutex;
+	struct mutex command_mutex;
+	struct mutex identify_mutex;
+	struct delayed_work polling_work;
+	struct workqueue_struct *polling_workqueue;
+	struct task_struct *notifier_thread;
+	struct pinctrl *ts_pinctrl;
+	struct pinctrl_state *pinctrl_state_active;
+	struct pinctrl_state *pinctrl_state_suspend;
+	struct pinctrl_state *pinctrl_state_release;
+#if defined(CONFIG_DRM) || defined(CONFIG_FB)
+	struct notifier_block fb_notifier;
+#endif
+	struct syna_tcm_buffer in;
+	struct syna_tcm_buffer out;
+	struct syna_tcm_buffer resp;
+	struct syna_tcm_buffer temp;
+	struct syna_tcm_buffer config;
+	struct syna_tcm_report report;
+	struct syna_tcm_app_info app_info;
+	struct syna_tcm_boot_info boot_info;
+	struct syna_tcm_touch_info touch_info;
+	struct syna_tcm_identification id_info;
+	struct syna_tcm_helper helper;
+	struct syna_tcm_watchdog watchdog;
+	struct syna_tcm_features features;
+	const struct syna_tcm_hw_interface *hw_if;
+	int (*reset)(struct syna_tcm_hcd *tcm_hcd, bool hw, bool update_wd);
+	int (*sleep)(struct syna_tcm_hcd *tcm_hcd, bool en);
+	int (*identify)(struct syna_tcm_hcd *tcm_hcd, bool id);
+	int (*enable_irq)(struct syna_tcm_hcd *tcm_hcd, bool en, bool ns);
+	int (*switch_mode)(struct syna_tcm_hcd *tcm_hcd,
+			enum firmware_mode mode);
+	int (*read_message)(struct syna_tcm_hcd *tcm_hcd,
+			unsigned char *in_buf, unsigned int length);
+	int (*write_message)(struct syna_tcm_hcd *tcm_hcd,
+			unsigned char command, unsigned char *payload,
+			unsigned int length, unsigned char **resp_buf,
+			unsigned int *resp_buf_size, unsigned int *resp_length,
+			unsigned char *response_code,
+			unsigned int polling_delay_ms);
+	int (*get_dynamic_config)(struct syna_tcm_hcd *tcm_hcd,
+			enum dynamic_config_id id, unsigned short *value);
+	int (*set_dynamic_config)(struct syna_tcm_hcd *tcm_hcd,
+			enum dynamic_config_id id, unsigned short value);
+	int (*get_data_location)(struct syna_tcm_hcd *tcm_hcd,
+			enum flash_area area, unsigned int *addr,
+			unsigned int *length);
+	int (*read_flash_data)(enum flash_area area, bool run_app_firmware,
+			struct syna_tcm_buffer *output);
+	void (*report_touch)(void);
+	void (*update_watchdog)(struct syna_tcm_hcd *tcm_hcd, bool en);
+};
+
+struct syna_tcm_module_cb {
+	enum module_type type;
+	int (*init)(struct syna_tcm_hcd *tcm_hcd);
+	int (*remove)(struct syna_tcm_hcd *tcm_hcd);
+	int (*syncbox)(struct syna_tcm_hcd *tcm_hcd);
+	int (*asyncbox)(struct syna_tcm_hcd *tcm_hcd);
+	int (*reset)(struct syna_tcm_hcd *tcm_hcd);
+	int (*suspend)(struct syna_tcm_hcd *tcm_hcd);
+	int (*resume)(struct syna_tcm_hcd *tcm_hcd);
+	int (*early_suspend)(struct syna_tcm_hcd *tcm_hcd);
+};
+
+struct syna_tcm_module_handler {
+	bool insert;
+	bool detach;
+	struct list_head link;
+	struct syna_tcm_module_cb *mod_cb;
+};
+
+struct syna_tcm_module_pool {
+	bool initialized;
+	bool queue_work;
+	bool reconstructing;
+	struct mutex mutex;
+	struct list_head list;
+	struct work_struct work;
+	struct workqueue_struct *workqueue;
+	struct syna_tcm_hcd *tcm_hcd;
+};
+
+struct syna_tcm_bus_io {
+	unsigned char type;
+	int (*rmi_read)(struct syna_tcm_hcd *tcm_hcd, unsigned short addr,
+			unsigned char *data, unsigned int length);
+	int (*rmi_write)(struct syna_tcm_hcd *tcm_hcd, unsigned short addr,
+			unsigned char *data, unsigned int length);
+	int (*read)(struct syna_tcm_hcd *tcm_hcd, unsigned char *data,
+			unsigned int length);
+	int (*write)(struct syna_tcm_hcd *tcm_hcd, unsigned char *data,
+			unsigned int length);
+};
+
+struct syna_tcm_hw_interface {
+	struct syna_tcm_board_data *bdata;
+	const struct syna_tcm_bus_io *bus_io;
+};
+
+struct drm_panel *tcm_get_panel(void);
+
+int syna_tcm_bus_init(void);
+
+void syna_tcm_bus_exit(void);
+
+int syna_tcm_add_module(struct syna_tcm_module_cb *mod_cb, bool insert);
+
+static inline int syna_tcm_rmi_read(struct syna_tcm_hcd *tcm_hcd,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	return tcm_hcd->hw_if->bus_io->rmi_read(tcm_hcd, addr, data, length);
+}
+
+static inline int syna_tcm_rmi_write(struct syna_tcm_hcd *tcm_hcd,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	return tcm_hcd->hw_if->bus_io->rmi_write(tcm_hcd, addr, data, length);
+}
+
+static inline int syna_tcm_read(struct syna_tcm_hcd *tcm_hcd,
+		unsigned char *data, unsigned int length)
+{
+	return tcm_hcd->hw_if->bus_io->read(tcm_hcd, data, length);
+}
+
+static inline int syna_tcm_write(struct syna_tcm_hcd *tcm_hcd,
+		unsigned char *data, unsigned int length)
+{
+	return tcm_hcd->hw_if->bus_io->write(tcm_hcd, data, length);
+}
+
+static inline ssize_t syna_tcm_show_error(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	pr_err("%s: Attribute not readable\n",
+			__func__);
+
+	return -EPERM;
+}
+
+static inline ssize_t syna_tcm_store_error(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	pr_err("%s: Attribute not writable\n",
+			__func__);
+
+	return -EPERM;
+}
+
+static inline int secure_memcpy(unsigned char *dest, unsigned int dest_size,
+		const unsigned char *src, unsigned int src_size,
+		unsigned int count)
+{
+	if (dest == NULL || src == NULL)
+		return -EINVAL;
+
+	if (count > dest_size || count > src_size) {
+		pr_err("%s: src_size = %d, dest_size = %d, count = %d\n",
+				__func__, src_size, dest_size, count);
+		return -EINVAL;
+	}
+
+	memcpy((void *)dest, (const void *)src, count);
+
+	return 0;
+}
+
+static inline int syna_tcm_realloc_mem(struct syna_tcm_hcd *tcm_hcd,
+		struct syna_tcm_buffer *buffer, unsigned int size)
+{
+	int retval;
+	unsigned char *temp;
+
+	if (size > buffer->buf_size) {
+		temp = buffer->buf;
+
+		buffer->buf = kmalloc(size, GFP_KERNEL);
+		if (!(buffer->buf)) {
+			dev_err(tcm_hcd->pdev->dev.parent,
+					"%s: Failed to allocate memory\n",
+					__func__);
+			kfree(temp);
+			buffer->buf_size = 0;
+			return -ENOMEM;
+		}
+
+		retval = secure_memcpy(buffer->buf,
+				size,
+				temp,
+				buffer->buf_size,
+				buffer->buf_size);
+		if (retval < 0) {
+			dev_err(tcm_hcd->pdev->dev.parent,
+					"%s: Failed to copy data\n",
+					__func__);
+			kfree(temp);
+			kfree(buffer->buf);
+			buffer->buf_size = 0;
+			return retval;
+		}
+
+		kfree(temp);
+		buffer->buf_size = size;
+	}
+
+	return 0;
+}
+
+static inline int syna_tcm_alloc_mem(struct syna_tcm_hcd *tcm_hcd,
+		struct syna_tcm_buffer *buffer, unsigned int size)
+{
+	if (size > buffer->buf_size) {
+		kfree(buffer->buf);
+		buffer->buf = kmalloc(size, GFP_KERNEL);
+		if (!(buffer->buf)) {
+			dev_err(tcm_hcd->pdev->dev.parent,
+					"%s: Failed to allocate memory\n",
+					__func__);
+			dev_err(tcm_hcd->pdev->dev.parent,
+					"%s: Allocation size = %d\n",
+					__func__, size);
+			buffer->buf_size = 0;
+			buffer->data_length = 0;
+			return -ENOMEM;
+		}
+		buffer->buf_size = size;
+	}
+
+	memset(buffer->buf, 0x00, buffer->buf_size);
+	buffer->data_length = 0;
+
+	return 0;
+}
+
+static inline unsigned int le2_to_uint(const unsigned char *src)
+{
+	return (unsigned int)src[0] +
+			(unsigned int)src[1] * 0x100;
+}
+
+static inline unsigned int le4_to_uint(const unsigned char *src)
+{
+	return (unsigned int)src[0] +
+			(unsigned int)src[1] * 0x100 +
+			(unsigned int)src[2] * 0x10000 +
+			(unsigned int)src[3] * 0x1000000;
+}
+
+static inline unsigned int ceil_div(unsigned int dividend,
+		unsigned int divisor)
+{
+	return (dividend + divisor - 1) / divisor;
+}
+
+#endif
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_device.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_device.c
new file mode 100644
index 0000000..963e299
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_device.c
@@ -0,0 +1,707 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/cdev.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include "synaptics_tcm_core.h"
+
+#define CHAR_DEVICE_NAME "tcm"
+
+#define CONCURRENT true
+
+#define DEVICE_IOC_MAGIC 's'
+#define DEVICE_IOC_RESET _IO(DEVICE_IOC_MAGIC, 0) /* 0x00007300 */
+#define DEVICE_IOC_IRQ _IOW(DEVICE_IOC_MAGIC, 1, int) /* 0x40047301 */
+#define DEVICE_IOC_RAW _IOW(DEVICE_IOC_MAGIC, 2, int) /* 0x40047302 */
+#define DEVICE_IOC_CONCURRENT _IOW(DEVICE_IOC_MAGIC, 3, int) /* 0x40047303 */
+
+struct device_hcd {
+	dev_t dev_num;
+	bool raw_mode;
+	bool concurrent;
+	unsigned int ref_count;
+	struct cdev char_dev;
+	struct class *class;
+	struct device *device;
+	struct syna_tcm_buffer out;
+	struct syna_tcm_buffer resp;
+	struct syna_tcm_buffer report;
+	struct syna_tcm_hcd *tcm_hcd;
+};
+
+DECLARE_COMPLETION(device_remove_complete);
+
+static struct device_hcd *device_hcd;
+
+static int rmidev_major_num;
+
+static void device_capture_touch_report(unsigned int count)
+{
+	int retval;
+	unsigned char id;
+	unsigned int idx;
+	unsigned int size;
+	unsigned char *data;
+	struct syna_tcm_hcd *tcm_hcd = device_hcd->tcm_hcd;
+	static bool report;
+	static unsigned int offset;
+	static unsigned int remaining_size;
+
+	if (count < 2)
+		return;
+
+	data = &device_hcd->resp.buf[0];
+
+	if (data[0] != MESSAGE_MARKER)
+		return;
+
+	id = data[1];
+
+	size = 0;
+
+	LOCK_BUFFER(device_hcd->report);
+
+	switch (id) {
+	case REPORT_TOUCH:
+		if (count >= 4) {
+			remaining_size = le2_to_uint(&data[2]);
+		} else {
+			report = false;
+			goto exit;
+		}
+		retval = syna_tcm_alloc_mem(tcm_hcd,
+				&device_hcd->report,
+				remaining_size);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for report.buf\n");
+			report = false;
+			goto exit;
+		}
+		idx = 4;
+		size = count - idx;
+		offset = 0;
+		report = true;
+		break;
+	case STATUS_CONTINUED_READ:
+		if (report == false)
+			goto exit;
+		if (count >= 2) {
+			idx = 2;
+			size = count - idx;
+		}
+		break;
+	default:
+		goto exit;
+	}
+
+	if (size) {
+		size = MIN(size, remaining_size);
+		retval = secure_memcpy(&device_hcd->report.buf[offset],
+				device_hcd->report.buf_size - offset,
+				&data[idx],
+				count - idx,
+				size);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to copy touch report data\n");
+			report = false;
+			goto exit;
+		} else {
+			offset += size;
+			remaining_size -= size;
+			device_hcd->report.data_length += size;
+		}
+	}
+
+	if (remaining_size)
+		goto exit;
+
+	LOCK_BUFFER(tcm_hcd->report.buffer);
+
+	tcm_hcd->report.buffer.buf = device_hcd->report.buf;
+	tcm_hcd->report.buffer.buf_size = device_hcd->report.buf_size;
+	tcm_hcd->report.buffer.data_length = device_hcd->report.data_length;
+
+	tcm_hcd->report_touch();
+
+	UNLOCK_BUFFER(tcm_hcd->report.buffer);
+
+	report = false;
+
+exit:
+	UNLOCK_BUFFER(device_hcd->report);
+}
+
+static int device_capture_touch_report_config(unsigned int count)
+{
+	int retval;
+	unsigned int size;
+	unsigned int buf_size;
+	unsigned char *data;
+	struct syna_tcm_hcd *tcm_hcd = device_hcd->tcm_hcd;
+
+	if (device_hcd->raw_mode) {
+		if (count < 3) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Invalid write data\n");
+			return -EINVAL;
+		}
+
+		size = le2_to_uint(&device_hcd->out.buf[1]);
+
+		if (count - 3 < size) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Incomplete write data\n");
+			return -EINVAL;
+		}
+
+		if (!size)
+			return 0;
+
+		data = &device_hcd->out.buf[3];
+		buf_size = device_hcd->out.buf_size - 3;
+	} else {
+		size = count - 1;
+
+		if (!size)
+			return 0;
+
+		data = &device_hcd->out.buf[1];
+		buf_size = device_hcd->out.buf_size - 1;
+	}
+
+	LOCK_BUFFER(tcm_hcd->config);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&tcm_hcd->config,
+			size);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for tcm_hcd->config.buf\n");
+		UNLOCK_BUFFER(tcm_hcd->config);
+		return retval;
+	}
+
+	retval = secure_memcpy(tcm_hcd->config.buf,
+			tcm_hcd->config.buf_size,
+			data,
+			buf_size,
+			size);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy touch report config data\n");
+		UNLOCK_BUFFER(tcm_hcd->config);
+		return retval;
+	}
+
+	tcm_hcd->config.data_length = size;
+
+	UNLOCK_BUFFER(tcm_hcd->config);
+
+	return 0;
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long device_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int device_ioctl(struct inode *inp, struct file *filp, unsigned int cmd,
+		unsigned long arg)
+#endif
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = device_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	retval = 0;
+
+	switch (cmd) {
+	case DEVICE_IOC_RESET:
+		retval = tcm_hcd->reset(tcm_hcd, false, true);
+		break;
+	case DEVICE_IOC_IRQ:
+		if (arg == 0)
+			retval = tcm_hcd->enable_irq(tcm_hcd, false, false);
+		else if (arg == 1)
+			retval = tcm_hcd->enable_irq(tcm_hcd, true, NULL);
+		break;
+	case DEVICE_IOC_RAW:
+		if (arg == 0) {
+			device_hcd->raw_mode = false;
+			tcm_hcd->update_watchdog(tcm_hcd, true);
+		} else if (arg == 1) {
+			device_hcd->raw_mode = true;
+			tcm_hcd->update_watchdog(tcm_hcd, false);
+		}
+		break;
+	case DEVICE_IOC_CONCURRENT:
+		if (arg == 0)
+			device_hcd->concurrent = false;
+		else if (arg == 1)
+			device_hcd->concurrent = true;
+		break;
+	default:
+		retval = -ENOTTY;
+		break;
+	}
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static loff_t device_llseek(struct file *filp, loff_t off, int whence)
+{
+	return -EINVAL;
+}
+
+static ssize_t device_read(struct file *filp, char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = device_hcd->tcm_hcd;
+
+	if (count == 0)
+		return 0;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	LOCK_BUFFER(device_hcd->resp);
+
+	if (device_hcd->raw_mode) {
+		retval = syna_tcm_alloc_mem(tcm_hcd,
+				&device_hcd->resp,
+				count);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for resp.buf\n");
+			UNLOCK_BUFFER(device_hcd->resp);
+			goto exit;
+		}
+
+		retval = tcm_hcd->read_message(tcm_hcd,
+				device_hcd->resp.buf,
+				count);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to read message\n");
+			UNLOCK_BUFFER(device_hcd->resp);
+			goto exit;
+		}
+	} else {
+		if (count != device_hcd->resp.data_length) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Invalid length information\n");
+			UNLOCK_BUFFER(device_hcd->resp);
+			retval = -EINVAL;
+			goto exit;
+		}
+	}
+
+	if (copy_to_user(buf, device_hcd->resp.buf, count)) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy data to user space\n");
+		UNLOCK_BUFFER(device_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (!device_hcd->concurrent)
+		goto skip_concurrent;
+
+	if (tcm_hcd->report_touch == NULL) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Unable to report touch\n");
+		device_hcd->concurrent = false;
+	}
+
+	if (device_hcd->raw_mode)
+		device_capture_touch_report(count);
+
+skip_concurrent:
+	UNLOCK_BUFFER(device_hcd->resp);
+
+	retval = count;
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t device_write(struct file *filp, const char __user *buf,
+		size_t count, loff_t *f_pos)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = device_hcd->tcm_hcd;
+
+	if (count == 0)
+		return 0;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	LOCK_BUFFER(device_hcd->out);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&device_hcd->out,
+			count == 1 ? count + 1 : count);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for device_hcd->out.buf\n");
+		UNLOCK_BUFFER(device_hcd->out);
+		goto exit;
+	}
+
+	if (copy_from_user(device_hcd->out.buf, buf, count)) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to copy data from user space\n");
+		UNLOCK_BUFFER(device_hcd->out);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	LOCK_BUFFER(device_hcd->resp);
+
+	if (device_hcd->raw_mode) {
+		retval = tcm_hcd->write_message(tcm_hcd,
+				device_hcd->out.buf[0],
+				&device_hcd->out.buf[1],
+				count - 1,
+				NULL,
+				NULL,
+				NULL,
+				NULL,
+				0);
+	} else {
+		mutex_lock(&tcm_hcd->reset_mutex);
+		retval = tcm_hcd->write_message(tcm_hcd,
+				device_hcd->out.buf[0],
+				&device_hcd->out.buf[1],
+				count - 1,
+				&device_hcd->resp.buf,
+				&device_hcd->resp.buf_size,
+				&device_hcd->resp.data_length,
+				NULL,
+				0);
+		mutex_unlock(&tcm_hcd->reset_mutex);
+	}
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to write command 0x%02x\n",
+			device_hcd->out.buf[0]);
+		UNLOCK_BUFFER(device_hcd->resp);
+		UNLOCK_BUFFER(device_hcd->out);
+		goto exit;
+	}
+
+	if (count && device_hcd->out.buf[0] == CMD_SET_TOUCH_REPORT_CONFIG) {
+		retval = device_capture_touch_report_config(count);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to capture touch report config\n");
+		}
+	}
+
+	UNLOCK_BUFFER(device_hcd->out);
+
+	if (device_hcd->raw_mode)
+		retval = count;
+	else
+		retval = device_hcd->resp.data_length;
+
+	UNLOCK_BUFFER(device_hcd->resp);
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static int device_open(struct inode *inp, struct file *filp)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = device_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	if (device_hcd->ref_count < 1) {
+		device_hcd->ref_count++;
+		retval = 0;
+	} else {
+		retval = -EACCES;
+	}
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static int device_release(struct inode *inp, struct file *filp)
+{
+	struct syna_tcm_hcd *tcm_hcd = device_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	if (device_hcd->ref_count)
+		device_hcd->ref_count--;
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return 0;
+}
+
+static char *device_devnode(struct device *dev, umode_t *mode)
+{
+	if (!mode)
+		return NULL;
+
+	/* S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; */
+	*mode = 0666;
+
+	return kasprintf(GFP_KERNEL, "%s/%s", PLATFORM_DRIVER_NAME,
+			dev_name(dev));
+}
+
+static int device_create_class(void)
+{
+	struct syna_tcm_hcd *tcm_hcd = device_hcd->tcm_hcd;
+
+	if (device_hcd->class != NULL)
+		return 0;
+
+	device_hcd->class = class_create(THIS_MODULE, PLATFORM_DRIVER_NAME);
+
+	if (IS_ERR(device_hcd->class)) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create class\n");
+		return -ENODEV;
+	}
+
+	device_hcd->class->devnode = device_devnode;
+
+	return 0;
+}
+
+static const struct file_operations device_fops = {
+	.owner = THIS_MODULE,
+#ifdef HAVE_UNLOCKED_IOCTL
+	.unlocked_ioctl = device_ioctl,
+#ifdef HAVE_COMPAT_IOCTL
+	.compat_ioctl = device_ioctl,
+#endif
+#else
+	.ioctl = device_ioctl,
+#endif
+	.llseek = device_llseek,
+	.read = device_read,
+	.write = device_write,
+	.open = device_open,
+	.release = device_release,
+};
+
+static int device_init(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	dev_t dev_num;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	device_hcd = kzalloc(sizeof(*device_hcd), GFP_KERNEL);
+	if (!device_hcd) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for device_hcd\n");
+		return -ENOMEM;
+	}
+
+	device_hcd->tcm_hcd = tcm_hcd;
+
+	device_hcd->concurrent = CONCURRENT;
+
+	INIT_BUFFER(device_hcd->out, false);
+	INIT_BUFFER(device_hcd->resp, false);
+	INIT_BUFFER(device_hcd->report, false);
+
+	if (rmidev_major_num) {
+		dev_num = MKDEV(rmidev_major_num, 0);
+		retval = register_chrdev_region(dev_num, 1,
+				PLATFORM_DRIVER_NAME);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to register char device\n");
+			goto err_register_chrdev_region;
+		}
+	} else {
+		retval = alloc_chrdev_region(&dev_num, 0, 1,
+				PLATFORM_DRIVER_NAME);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to allocate char device\n");
+			goto err_alloc_chrdev_region;
+		}
+
+		rmidev_major_num = MAJOR(dev_num);
+	}
+
+	device_hcd->dev_num = dev_num;
+
+	cdev_init(&device_hcd->char_dev, &device_fops);
+
+	retval = cdev_add(&device_hcd->char_dev, dev_num, 1);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to add char device\n");
+		goto err_add_chardev;
+	}
+
+	retval = device_create_class();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create class\n");
+		goto err_create_class;
+	}
+
+	device_hcd->device = device_create(device_hcd->class, NULL,
+			device_hcd->dev_num, NULL, CHAR_DEVICE_NAME"%d",
+			MINOR(device_hcd->dev_num));
+	if (IS_ERR(device_hcd->device)) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create device\n");
+		retval = -ENODEV;
+		goto err_create_device;
+	}
+
+	if (bdata->irq_gpio >= 0) {
+		retval = gpio_export(bdata->irq_gpio, false);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to export GPIO\n");
+		} else {
+			retval = gpio_export_link(&tcm_hcd->pdev->dev,
+					"attn", bdata->irq_gpio);
+			if (retval < 0)
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to export GPIO link\n");
+		}
+	}
+
+	return 0;
+
+err_create_device:
+	class_destroy(device_hcd->class);
+
+err_create_class:
+	cdev_del(&device_hcd->char_dev);
+
+err_add_chardev:
+	unregister_chrdev_region(dev_num, 1);
+
+err_alloc_chrdev_region:
+err_register_chrdev_region:
+	RELEASE_BUFFER(device_hcd->report);
+	RELEASE_BUFFER(device_hcd->resp);
+	RELEASE_BUFFER(device_hcd->out);
+
+	kfree(device_hcd);
+	device_hcd = NULL;
+
+	return retval;
+}
+
+static int device_remove(struct syna_tcm_hcd *tcm_hcd)
+{
+	if (!device_hcd)
+		goto exit;
+
+	device_destroy(device_hcd->class, device_hcd->dev_num);
+
+	class_destroy(device_hcd->class);
+
+	cdev_del(&device_hcd->char_dev);
+
+	unregister_chrdev_region(device_hcd->dev_num, 1);
+
+	RELEASE_BUFFER(device_hcd->report);
+	RELEASE_BUFFER(device_hcd->resp);
+	RELEASE_BUFFER(device_hcd->out);
+
+	kfree(device_hcd);
+	device_hcd = NULL;
+
+exit:
+	complete(&device_remove_complete);
+
+	return 0;
+}
+
+static int device_reset(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+
+	if (!device_hcd) {
+		retval = device_init(tcm_hcd);
+		return retval;
+	}
+
+	return 0;
+}
+
+static struct syna_tcm_module_cb device_module = {
+	.type = TCM_DEVICE,
+	.init = device_init,
+	.remove = device_remove,
+	.syncbox = NULL,
+	.asyncbox = NULL,
+	.reset = device_reset,
+	.suspend = NULL,
+	.resume = NULL,
+	.early_suspend = NULL,
+};
+
+static int __init device_module_init(void)
+{
+	return syna_tcm_add_module(&device_module, true);
+}
+
+static void __exit device_module_exit(void)
+{
+	syna_tcm_add_module(&device_module, false);
+
+	wait_for_completion(&device_remove_complete);
+}
+
+module_init(device_module_init);
+module_exit(device_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics TCM Device Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_diagnostics.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_diagnostics.c
new file mode 100644
index 0000000..0f8570a
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_diagnostics.c
@@ -0,0 +1,564 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/sched/signal.h>
+#include "synaptics_tcm_core.h"
+
+#define SYSFS_DIR_NAME "diagnostics"
+
+enum pingpong_state {
+	PING = 0,
+	PONG = 1,
+};
+
+struct diag_hcd {
+	pid_t pid;
+	unsigned char report_type;
+	enum pingpong_state state;
+	struct kobject *sysfs_dir;
+	struct siginfo sigio;
+	struct task_struct *task;
+	struct syna_tcm_buffer ping;
+	struct syna_tcm_buffer pong;
+	struct syna_tcm_hcd *tcm_hcd;
+};
+
+DECLARE_COMPLETION(diag_remove_complete);
+
+static struct diag_hcd *diag_hcd;
+
+STORE_PROTOTYPE(diag, pid);
+SHOW_PROTOTYPE(diag, size);
+STORE_PROTOTYPE(diag, type);
+SHOW_PROTOTYPE(diag, rows);
+SHOW_PROTOTYPE(diag, cols);
+SHOW_PROTOTYPE(diag, hybrid);
+SHOW_PROTOTYPE(diag, buttons);
+
+static struct device_attribute *attrs[] = {
+	ATTRIFY(pid),
+	ATTRIFY(size),
+	ATTRIFY(type),
+	ATTRIFY(rows),
+	ATTRIFY(cols),
+	ATTRIFY(hybrid),
+	ATTRIFY(buttons),
+};
+
+static ssize_t diag_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static struct bin_attribute bin_attr = {
+	.attr = {
+		.name = "data",
+		.mode = 0444,
+	},
+	.size = 0,
+	.read = diag_sysfs_data_show,
+};
+
+static ssize_t diag_sysfs_pid_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct syna_tcm_hcd *tcm_hcd = diag_hcd->tcm_hcd;
+
+	if (kstrtouint(buf, 10, &input))
+		return -EINVAL;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	diag_hcd->pid = input;
+
+	if (diag_hcd->pid) {
+		diag_hcd->task = pid_task(find_vpid(diag_hcd->pid),
+				PIDTYPE_PID);
+		if (!diag_hcd->task) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to locate task\n");
+			retval = -EINVAL;
+			goto exit;
+		}
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t diag_sysfs_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = diag_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	if (diag_hcd->state == PING) {
+		LOCK_BUFFER(diag_hcd->ping);
+
+		retval = snprintf(buf, PAGE_SIZE,
+				"%u\n",
+				diag_hcd->ping.data_length);
+
+		UNLOCK_BUFFER(diag_hcd->ping);
+	} else {
+		LOCK_BUFFER(diag_hcd->pong);
+
+		retval = snprintf(buf, PAGE_SIZE,
+				"%u\n",
+				diag_hcd->pong.data_length);
+
+		UNLOCK_BUFFER(diag_hcd->pong);
+	}
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t diag_sysfs_type_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int input;
+	struct syna_tcm_hcd *tcm_hcd = diag_hcd->tcm_hcd;
+
+	if (kstrtouint(buf, 10, &input))
+		return -EINVAL;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	diag_hcd->report_type = (unsigned char)input;
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return count;
+}
+
+static ssize_t diag_sysfs_rows_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned int rows;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = diag_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION ||
+			tcm_hcd->app_status != APP_STATUS_OK) {
+		retval = -ENODEV;
+		goto exit;
+	}
+
+	app_info = &tcm_hcd->app_info;
+	rows = le2_to_uint(app_info->num_of_image_rows);
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", rows);
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t diag_sysfs_cols_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned int cols;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = diag_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION ||
+			tcm_hcd->app_status != APP_STATUS_OK) {
+		retval = -ENODEV;
+		goto exit;
+	}
+
+	app_info = &tcm_hcd->app_info;
+	cols = le2_to_uint(app_info->num_of_image_cols);
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", cols);
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t diag_sysfs_hybrid_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned int hybrid;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = diag_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION ||
+			tcm_hcd->app_status != APP_STATUS_OK) {
+		retval = -ENODEV;
+		goto exit;
+	}
+
+	app_info = &tcm_hcd->app_info;
+	hybrid = le2_to_uint(app_info->has_hybrid_data);
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", hybrid);
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t diag_sysfs_buttons_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	unsigned int buttons;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = diag_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION ||
+			tcm_hcd->app_status != APP_STATUS_OK) {
+		retval = -ENODEV;
+		goto exit;
+	}
+
+	app_info = &tcm_hcd->app_info;
+	buttons = le2_to_uint(app_info->num_of_buttons);
+
+	retval = snprintf(buf, PAGE_SIZE, "%u\n", buttons);
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t diag_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int readlen;
+	struct syna_tcm_hcd *tcm_hcd = diag_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	retval = 0;
+
+	if (diag_hcd->state == PING) {
+		LOCK_BUFFER(diag_hcd->ping);
+
+		if (diag_hcd->ping.data_length == 0) {
+			readlen = 0;
+			goto exit;
+		}
+
+		readlen = MIN(count, diag_hcd->ping.data_length - pos);
+
+		if (diag_hcd->ping.data_length) {
+			retval = secure_memcpy(buf,
+					count,
+					&diag_hcd->ping.buf[pos],
+					diag_hcd->ping.buf_size - pos,
+					readlen);
+		}
+
+		UNLOCK_BUFFER(diag_hcd->ping);
+	} else {
+		LOCK_BUFFER(diag_hcd->pong);
+
+		if (diag_hcd->pong.data_length == 0) {
+			readlen = 0;
+			goto exit;
+		}
+
+		readlen = MIN(count, diag_hcd->pong.data_length - pos);
+
+		if (diag_hcd->pong.data_length) {
+			retval = secure_memcpy(buf,
+					count,
+					&diag_hcd->pong.buf[pos],
+					diag_hcd->pong.buf_size - pos,
+					readlen);
+		}
+
+		UNLOCK_BUFFER(diag_hcd->pong);
+	}
+
+exit:
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy report data\n");
+	} else {
+		retval = readlen;
+	}
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static void diag_report(void)
+{
+	int retval;
+	static enum pingpong_state state = PING;
+	struct syna_tcm_hcd *tcm_hcd = diag_hcd->tcm_hcd;
+
+	if (state == PING) {
+		LOCK_BUFFER(diag_hcd->ping);
+
+		retval = syna_tcm_alloc_mem(tcm_hcd,
+				&diag_hcd->ping,
+				tcm_hcd->report.buffer.data_length);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for ping.buf\n");
+			UNLOCK_BUFFER(diag_hcd->ping);
+			return;
+		}
+
+		retval = secure_memcpy(diag_hcd->ping.buf,
+				diag_hcd->ping.buf_size,
+				tcm_hcd->report.buffer.buf,
+				tcm_hcd->report.buffer.buf_size,
+				tcm_hcd->report.buffer.data_length);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to copy report data\n");
+			UNLOCK_BUFFER(diag_hcd->ping);
+			return;
+		}
+
+		diag_hcd->ping.data_length = tcm_hcd->report.buffer.data_length;
+
+		UNLOCK_BUFFER(diag_hcd->ping);
+
+		diag_hcd->state = state;
+		state = PONG;
+	} else {
+		LOCK_BUFFER(diag_hcd->pong);
+
+		retval = syna_tcm_alloc_mem(tcm_hcd,
+				&diag_hcd->pong,
+				tcm_hcd->report.buffer.data_length);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for pong.buf\n");
+			UNLOCK_BUFFER(diag_hcd->pong);
+			return;
+		}
+
+		retval = secure_memcpy(diag_hcd->pong.buf,
+				diag_hcd->pong.buf_size,
+				tcm_hcd->report.buffer.buf,
+				tcm_hcd->report.buffer.buf_size,
+				tcm_hcd->report.buffer.data_length);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to copy report data\n");
+			UNLOCK_BUFFER(diag_hcd->pong);
+			return;
+		}
+
+		diag_hcd->pong.data_length = tcm_hcd->report.buffer.data_length;
+
+		UNLOCK_BUFFER(diag_hcd->pong);
+
+		diag_hcd->state = state;
+		state = PING;
+	}
+
+	if (diag_hcd->pid)
+		send_sig_info(SIGIO, &diag_hcd->sigio, diag_hcd->task);
+}
+
+static int diag_init(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	int idx;
+
+	diag_hcd = kzalloc(sizeof(*diag_hcd), GFP_KERNEL);
+	if (!diag_hcd) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for diag_hcd\n");
+		return -ENOMEM;
+	}
+
+	diag_hcd->tcm_hcd = tcm_hcd;
+	diag_hcd->state = PING;
+
+	INIT_BUFFER(diag_hcd->ping, false);
+	INIT_BUFFER(diag_hcd->pong, false);
+
+	memset(&diag_hcd->sigio, 0x00, sizeof(diag_hcd->sigio));
+	diag_hcd->sigio.si_signo = SIGIO;
+	diag_hcd->sigio.si_code = SI_USER;
+
+	diag_hcd->sysfs_dir = kobject_create_and_add(SYSFS_DIR_NAME,
+			tcm_hcd->sysfs_dir);
+	if (!diag_hcd->sysfs_dir) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create sysfs directory\n");
+		retval = -EINVAL;
+		goto err_sysfs_create_dir;
+	}
+
+	for (idx = 0; idx < ARRAY_SIZE(attrs); idx++) {
+		retval = sysfs_create_file(diag_hcd->sysfs_dir,
+				&(*attrs[idx]).attr);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to create sysfs file\n");
+			goto err_sysfs_create_file;
+		}
+	}
+
+	retval = sysfs_create_bin_file(diag_hcd->sysfs_dir, &bin_attr);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create sysfs bin file\n");
+		goto err_sysfs_create_bin_file;
+	}
+
+	return 0;
+
+err_sysfs_create_bin_file:
+err_sysfs_create_file:
+	for (idx--; idx >= 0; idx--)
+		sysfs_remove_file(diag_hcd->sysfs_dir, &(*attrs[idx]).attr);
+
+	kobject_put(diag_hcd->sysfs_dir);
+
+err_sysfs_create_dir:
+	RELEASE_BUFFER(diag_hcd->pong);
+	RELEASE_BUFFER(diag_hcd->ping);
+
+	kfree(diag_hcd);
+	diag_hcd = NULL;
+
+	return retval;
+}
+
+static int diag_remove(struct syna_tcm_hcd *tcm_hcd)
+{
+	int idx;
+
+	if (!diag_hcd)
+		goto exit;
+
+	sysfs_remove_bin_file(diag_hcd->sysfs_dir, &bin_attr);
+
+	for (idx = 0; idx < ARRAY_SIZE(attrs); idx++)
+		sysfs_remove_file(diag_hcd->sysfs_dir, &(*attrs[idx]).attr);
+
+	kobject_put(diag_hcd->sysfs_dir);
+
+	RELEASE_BUFFER(diag_hcd->pong);
+	RELEASE_BUFFER(diag_hcd->ping);
+
+	kfree(diag_hcd);
+	diag_hcd = NULL;
+
+exit:
+	complete(&diag_remove_complete);
+
+	return 0;
+}
+
+static int diag_syncbox(struct syna_tcm_hcd *tcm_hcd)
+{
+	if (!diag_hcd)
+		return 0;
+
+	if (tcm_hcd->report.id == diag_hcd->report_type)
+		diag_report();
+
+	return 0;
+}
+
+static int diag_reset(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+
+	if (!diag_hcd) {
+		retval = diag_init(tcm_hcd);
+		return retval;
+	}
+
+	return 0;
+}
+
+static struct syna_tcm_module_cb diag_module = {
+	.type = TCM_DIAGNOSTICS,
+	.init = diag_init,
+	.remove = diag_remove,
+	.syncbox = diag_syncbox,
+	.asyncbox = NULL,
+	.reset = diag_reset,
+	.suspend = NULL,
+	.resume = NULL,
+	.early_suspend = NULL,
+};
+
+static int __init diag_module_init(void)
+{
+	return syna_tcm_add_module(&diag_module, true);
+}
+
+static void __exit diag_module_exit(void)
+{
+	syna_tcm_add_module(&diag_module, false);
+
+	wait_for_completion(&diag_remove_complete);
+}
+
+module_init(diag_module_init);
+module_exit(diag_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics TCM Diagnostics Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_i2c.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_i2c.c
new file mode 100644
index 0000000..88af3f8
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_i2c.c
@@ -0,0 +1,519 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/i2c.h>
+#include <linux/of_gpio.h>
+#include "synaptics_tcm_core.h"
+#include "linux/moduleparam.h"
+
+#define XFER_ATTEMPTS 10
+
+static unsigned char *buf;
+
+static unsigned int buf_size;
+
+static struct syna_tcm_bus_io bus_io;
+
+static struct syna_tcm_hw_interface hw_if;
+
+static struct platform_device *syna_tcm_i2c_device;
+
+static struct drm_panel *active_tcm_panel;
+
+struct drm_panel *tcm_get_panel(void)
+{
+	return active_tcm_panel;
+}
+
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct syna_tcm_board_data *bdata)
+{
+	int retval;
+	struct device_node *np = dev->of_node;
+
+	retval = of_get_named_gpio_flags(np,
+			"synaptics,irq-gpio", 0,
+			(enum of_gpio_flags *)&bdata->irq_flags);
+	if (!gpio_is_valid(retval)) {
+		if (retval != -EPROBE_DEFER)
+			dev_err(dev, "Error getting irq_gpio\n");
+		return retval;
+	}
+	bdata->irq_gpio = retval;
+
+	of_property_read_u32(np, "synaptics,irq-on-state",
+			&bdata->irq_on_state);
+	of_property_read_string(np, "synaptics,pwr-reg-name",
+			&bdata->pwr_reg_name);
+	of_property_read_string(np, "synaptics,bus-reg-name",
+			&bdata->bus_reg_name);
+	of_property_read_string(np, "synaptics,firmware-name",
+			&bdata->fw_name);
+
+	bdata->power_gpio = of_get_named_gpio_flags(np,
+			"synaptics,power-gpio", 0, NULL);
+
+	retval = of_property_read_u32(np, "synaptics,power-on-state",
+			&bdata->power_on_state);
+	if (retval < 0) {
+		LOGE(dev, "Failed to read synaptics,power-on-state\n");
+		return retval;
+	}
+
+	retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+			&bdata->power_delay_ms);
+	if (retval < 0) {
+		LOGE(dev, "Failed to read synaptics,power-delay-ms\n");
+		return retval;
+	}
+
+	retval = of_get_named_gpio_flags(np,
+			"synaptics,reset-gpio", 0, NULL);
+	if (!gpio_is_valid(retval)) {
+		if (retval != -EPROBE_DEFER)
+			dev_err(dev, "Error getting irq_gpio\n");
+		return retval;
+	}
+	bdata->reset_gpio = retval;
+
+	retval = of_property_read_u32(np, "synaptics,reset-on-state",
+			&bdata->reset_on_state);
+	if (retval < 0) {
+		LOGE(dev, "Failed to read synaptics,reset-on-state\n");
+		return retval;
+	}
+
+	retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+			&bdata->reset_active_ms);
+	if (retval < 0) {
+		LOGE(dev, "Failed to read synaptics,reset-active-ms\n");
+		return retval;
+	}
+
+	retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+			&bdata->reset_delay_ms);
+	if (retval < 0) {
+		LOGE(dev, "Unable to read synaptics,reset-delay-ms\n");
+		return retval;
+	}
+
+	bdata->x_flip = of_property_read_bool(np, "synaptics,x-flip");
+	bdata->y_flip = of_property_read_bool(np, "synaptics,y-flip");
+	bdata->swap_axes = of_property_read_bool(np, "synaptics,swap-axes");
+
+	retval = of_property_read_u32(np, "synaptics,ubl-i2c-addr",
+			&bdata->ubl_i2c_addr);
+	if (retval < 0) {
+		LOGE(dev, "Unable to read synaptics,ubl-i2c-addr\n");
+		return retval;
+	}
+
+	bdata->extend_report = of_property_read_bool(np,
+			"synaptics,extend_report");
+
+	return 0;
+}
+#endif
+
+static int syna_tcm_i2c_alloc_mem(struct syna_tcm_hcd *tcm_hcd,
+		unsigned int size)
+{
+	struct i2c_client *i2c = to_i2c_client(tcm_hcd->pdev->dev.parent);
+
+	if (size > buf_size) {
+		if (buf_size)
+			kfree(buf);
+		buf = kmalloc(size, GFP_KERNEL);
+		if (!buf) {
+			LOGE(&i2c->dev,
+					"Failed to allocate memory for buf\n");
+			buf_size = 0;
+			return -ENOMEM;
+		}
+		buf_size = size;
+	}
+
+	return 0;
+}
+
+static int syna_tcm_i2c_rmi_read(struct syna_tcm_hcd *tcm_hcd,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned char address;
+	unsigned int attempt;
+	struct i2c_msg msg[2];
+	struct i2c_client *i2c = to_i2c_client(tcm_hcd->pdev->dev.parent);
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	mutex_lock(&tcm_hcd->io_ctrl_mutex);
+
+	address = (unsigned char)addr;
+
+	msg[0].addr = bdata->ubl_i2c_addr;
+	msg[0].flags = 0;
+	msg[0].len = 1;
+	msg[0].buf = &address;
+
+	msg[1].addr = bdata->ubl_i2c_addr;
+	msg[1].flags = I2C_M_RD;
+	msg[1].len = length;
+	msg[1].buf = data;
+
+	for (attempt = 0; attempt < XFER_ATTEMPTS; attempt++) {
+		if (i2c_transfer(i2c->adapter, msg, 2) == 2) {
+			retval = length;
+			goto exit;
+		}
+
+		LOGD(&i2c->dev, "Transfer attempt %d times\n", attempt + 1);
+
+		if (attempt + 1 == XFER_ATTEMPTS) {
+			LOGE(&i2c->dev, "Transfer failed\n");
+			retval = -EIO;
+			goto exit;
+		}
+
+		msleep(20);
+	}
+
+exit:
+	mutex_unlock(&tcm_hcd->io_ctrl_mutex);
+
+	return retval;
+}
+
+static int syna_tcm_i2c_rmi_write(struct syna_tcm_hcd *tcm_hcd,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned int attempt;
+	unsigned int byte_count;
+	struct i2c_msg msg;
+	struct i2c_client *i2c = to_i2c_client(tcm_hcd->pdev->dev.parent);
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	mutex_lock(&tcm_hcd->io_ctrl_mutex);
+
+	byte_count = length + 1;
+
+	retval = syna_tcm_i2c_alloc_mem(tcm_hcd, byte_count);
+	if (retval < 0) {
+		LOGE(&i2c->dev,
+				"Failed to allocate memory\n");
+		goto exit;
+	}
+
+	buf[0] = (unsigned char)addr;
+	retval = secure_memcpy(&buf[1],
+			buf_size - 1,
+			data,
+			length,
+			length);
+	if (retval < 0) {
+		LOGE(&i2c->dev,
+				"Failed to copy write data\n");
+		goto exit;
+	}
+
+	msg.addr = bdata->ubl_i2c_addr;
+	msg.flags = 0;
+	msg.len = byte_count;
+	msg.buf = buf;
+
+	for (attempt = 0; attempt < XFER_ATTEMPTS; attempt++) {
+		if (i2c_transfer(i2c->adapter, &msg, 1) == 1) {
+			retval = length;
+			goto exit;
+		}
+
+		LOGD(&i2c->dev, "Transfer attempt %d times\n", attempt + 1);
+
+		if (attempt + 1 == XFER_ATTEMPTS) {
+			LOGE(&i2c->dev, "Transfer failed\n");
+			retval = -EIO;
+			goto exit;
+		}
+
+		msleep(20);
+	}
+
+exit:
+	mutex_unlock(&tcm_hcd->io_ctrl_mutex);
+
+	return retval;
+}
+
+static int syna_tcm_i2c_read(struct syna_tcm_hcd *tcm_hcd, unsigned char *data,
+		unsigned int length)
+{
+	int retval;
+	unsigned int attempt;
+	struct i2c_msg msg;
+	struct i2c_client *i2c = to_i2c_client(tcm_hcd->pdev->dev.parent);
+
+	mutex_lock(&tcm_hcd->io_ctrl_mutex);
+
+	msg.addr = i2c->addr;
+	msg.flags = I2C_M_RD;
+	msg.len = length;
+	msg.buf = data;
+
+	for (attempt = 0; attempt < XFER_ATTEMPTS; attempt++) {
+		if (i2c_transfer(i2c->adapter, &msg, 1) == 1) {
+			retval = length;
+			goto exit;
+		}
+
+		LOGD(&i2c->dev, "Transfer attempt %d times\n", attempt + 1);
+
+		if (attempt + 1 == XFER_ATTEMPTS) {
+			LOGE(&i2c->dev, "Transfer failed\n");
+			retval = -EIO;
+			goto exit;
+		}
+
+		msleep(20);
+	}
+
+exit:
+	mutex_unlock(&tcm_hcd->io_ctrl_mutex);
+
+	return retval;
+}
+
+static int syna_tcm_i2c_write(struct syna_tcm_hcd *tcm_hcd, unsigned char *data,
+		unsigned int length)
+{
+	int retval;
+	unsigned int attempt;
+	struct i2c_msg msg;
+	struct i2c_client *i2c = to_i2c_client(tcm_hcd->pdev->dev.parent);
+
+	mutex_lock(&tcm_hcd->io_ctrl_mutex);
+
+	msg.addr = i2c->addr;
+	msg.flags = 0;
+	msg.len = length;
+	msg.buf = data;
+
+	for (attempt = 0; attempt < XFER_ATTEMPTS; attempt++) {
+		if (i2c_transfer(i2c->adapter, &msg, 1) == 1) {
+			retval = length;
+			goto exit;
+		}
+
+		LOGD(&i2c->dev, "Transfer attempt %d times\n", attempt + 1);
+
+		if (attempt + 1 == XFER_ATTEMPTS) {
+			LOGE(&i2c->dev, "Transfer failed\n");
+			retval = -EIO;
+			goto exit;
+		}
+
+		msleep(20);
+	}
+
+exit:
+	mutex_unlock(&tcm_hcd->io_ctrl_mutex);
+
+	return retval;
+}
+
+static int syna_tcm_check_dt(struct device_node *np)
+{
+	int i;
+	int count;
+	struct device_node *node;
+	struct drm_panel *panel;
+
+	count = of_count_phandle_with_args(np, "panel", NULL);
+	if (count <= 0)
+		return 0;
+
+	for (i = 0; i < count; i++) {
+		node = of_parse_phandle(np, "panel", i);
+		panel = of_drm_find_panel(node);
+		of_node_put(node);
+		if (!IS_ERR(panel)) {
+			active_tcm_panel = panel;
+			return 0;
+		}
+	}
+
+	return -ENODEV;
+}
+
+static int syna_tcm_check_default_tp(struct device_node *dt, const char *prop)
+{
+	const char *active_tp;
+	const char *compatible;
+	char *start;
+	int ret;
+
+	ret = of_property_read_string(dt->parent, prop, &active_tp);
+	if (ret) {
+		pr_err(" %s:fail to read %s %d\n", __func__, prop, ret);
+		return -ENODEV;
+	}
+
+	ret = of_property_read_string(dt, "compatible", &compatible);
+	if (ret < 0) {
+		pr_err(" %s:fail to read %s %d\n", __func__, "compatible", ret);
+		return -ENODEV;
+	}
+
+	start = strnstr(active_tp, compatible, strlen(active_tp));
+	if (start == NULL) {
+		pr_err(" %s:no match compatible, %s, %s\n",
+			__func__, compatible, active_tp);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+static int syna_tcm_i2c_probe(struct i2c_client *i2c,
+		const struct i2c_device_id *dev_id)
+{
+	int retval;
+	struct device_node *dt = i2c->dev.of_node;
+
+	if (syna_tcm_check_dt(dt)) {
+		if (!syna_tcm_check_default_tp(dt, "qcom,i2c-touch-active"))
+			retval = -EPROBE_DEFER;
+		else
+			retval = -ENODEV;
+
+		return retval;
+	}
+
+	syna_tcm_i2c_device = platform_device_alloc(PLATFORM_DRIVER_NAME, 0);
+	if (!syna_tcm_i2c_device) {
+		LOGE(&i2c->dev,
+				"Failed to allocate platform device\n");
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	hw_if.bdata = devm_kzalloc(&i2c->dev, sizeof(*hw_if.bdata), GFP_KERNEL);
+	if (!hw_if.bdata) {
+		LOGE(&i2c->dev,
+				"Failed to allocate memory for board data\n");
+		return -ENOMEM;
+	}
+	retval = parse_dt(&i2c->dev, hw_if.bdata);
+	if (retval < 0) {
+		LOGE(&i2c->dev, "Failed to parse dt\n");
+		return retval;
+	}
+#else
+	hw_if.bdata = i2c->dev.platform_data;
+#endif
+
+	bus_io.type = BUS_I2C;
+	bus_io.read = syna_tcm_i2c_read;
+	bus_io.write = syna_tcm_i2c_write;
+	bus_io.rmi_read = syna_tcm_i2c_rmi_read;
+	bus_io.rmi_write = syna_tcm_i2c_rmi_write;
+
+	hw_if.bus_io = &bus_io;
+
+	syna_tcm_i2c_device->dev.parent = &i2c->dev;
+	syna_tcm_i2c_device->dev.platform_data = &hw_if;
+
+	retval = platform_device_add(syna_tcm_i2c_device);
+	if (retval < 0) {
+		LOGE(&i2c->dev,
+				"Failed to add platform device\n");
+		return retval;
+	}
+
+	return 0;
+}
+
+static int syna_tcm_i2c_remove(struct i2c_client *i2c)
+{
+	syna_tcm_i2c_device->dev.platform_data = NULL;
+
+	platform_device_unregister(syna_tcm_i2c_device);
+
+	return 0;
+}
+
+static const struct i2c_device_id syna_tcm_id_table[] = {
+	{I2C_MODULE_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, syna_tcm_id_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id syna_tcm_of_match_table[] = {
+	{
+		.compatible = "synaptics,tcm-i2c",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, syna_tcm_of_match_table);
+#else
+#define syna_tcm_of_match_table NULL
+#endif
+
+static struct i2c_driver syna_tcm_i2c_driver = {
+	.driver = {
+		.name = I2C_MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = syna_tcm_of_match_table,
+	},
+	.probe = syna_tcm_i2c_probe,
+	.remove = syna_tcm_i2c_remove,
+	.id_table = syna_tcm_id_table,
+};
+
+int syna_tcm_bus_init(void)
+{
+	return i2c_add_driver(&syna_tcm_i2c_driver);
+}
+EXPORT_SYMBOL(syna_tcm_bus_init);
+
+void syna_tcm_bus_exit(void)
+{
+	kfree(buf);
+
+	i2c_del_driver(&syna_tcm_i2c_driver);
+}
+EXPORT_SYMBOL(syna_tcm_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics TCM I2C Bus Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_recovery.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_recovery.c
new file mode 100644
index 0000000..027facb
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_recovery.c
@@ -0,0 +1,898 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include "synaptics_tcm_core.h"
+
+#define SET_UP_RECOVERY_MODE true
+
+#define ENABLE_SYSFS_INTERFACE true
+
+#define SYSFS_DIR_NAME "recovery"
+
+#define IHEX_BUF_SIZE (2048 * 1024)
+
+#define DATA_BUF_SIZE (512 * 1024)
+
+#define IHEX_RECORD_SIZE 14
+
+#define PDT_START_ADDR 0x00e9
+
+#define UBL_FN_NUMBER 0x35
+
+#define F35_CHUNK_SIZE 16
+
+#define F35_CHUNK_SIZE_WORDS 8
+
+#define F35_ERASE_ALL_WAIT_MS 5000
+
+#define F35_ERASE_ALL_POLL_MS 100
+
+#define F35_DATA5_OFFSET 5
+
+#define F35_CTRL3_OFFSET 18
+
+#define F35_RESET_COMMAND 16
+
+#define F35_ERASE_ALL_COMMAND 3
+
+#define F35_WRITE_CHUNK_COMMAND 2
+
+#define F35_READ_FLASH_STATUS_COMMAND 1
+
+struct rmi_pdt_entry {
+	unsigned char query_base_addr;
+	unsigned char command_base_addr;
+	unsigned char control_base_addr;
+	unsigned char data_base_addr;
+	unsigned char intr_src_count:3;
+	unsigned char reserved_1:2;
+	unsigned char fn_version:2;
+	unsigned char reserved_2:1;
+	unsigned char fn_number;
+} __packed;
+
+struct rmi_addr {
+	unsigned short query_base;
+	unsigned short command_base;
+	unsigned short control_base;
+	unsigned short data_base;
+};
+
+struct recovery_hcd {
+	bool set_up_recovery_mode;
+	unsigned char chunk_buf[F35_CHUNK_SIZE + 3];
+	unsigned char out_buf[3];
+	unsigned char *ihex_buf;
+	unsigned char *data_buf;
+	unsigned int ihex_size;
+	unsigned int ihex_records;
+	unsigned int data_entries;
+	struct kobject *sysfs_dir;
+	struct rmi_addr f35_addr;
+	struct syna_tcm_hcd *tcm_hcd;
+};
+
+DECLARE_COMPLETION(recovery_remove_complete);
+
+static struct recovery_hcd *recovery_hcd;
+
+static int recovery_do_recovery(void);
+
+STORE_PROTOTYPE(recovery, recovery);
+
+static struct device_attribute *attrs[] = {
+	ATTRIFY(recovery),
+};
+
+static ssize_t recovery_sysfs_ihex_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static struct bin_attribute bin_attr = {
+	.attr = {
+		.name = "ihex",
+		.mode = 0220,
+	},
+	.size = 0,
+	.write = recovery_sysfs_ihex_store,
+};
+
+static ssize_t recovery_sysfs_recovery_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+
+	if (kstrtouint(buf, 10, &input))
+		return -EINVAL;
+
+	if (input == 1)
+		recovery_hcd->set_up_recovery_mode = true;
+	else if (input == 2)
+		recovery_hcd->set_up_recovery_mode = false;
+	else
+		return -EINVAL;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	if (recovery_hcd->ihex_size == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to get ihex data\n");
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	if (recovery_hcd->ihex_size % IHEX_RECORD_SIZE) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid ihex data\n");
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	recovery_hcd->ihex_records = recovery_hcd->ihex_size / IHEX_RECORD_SIZE;
+
+	retval = recovery_do_recovery();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do recovery\n");
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	recovery_hcd->set_up_recovery_mode = SET_UP_RECOVERY_MODE;
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t recovery_sysfs_ihex_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	retval = secure_memcpy(&recovery_hcd->ihex_buf[pos],
+			IHEX_BUF_SIZE - pos,
+			buf,
+			count,
+			count);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy ihex data\n");
+		recovery_hcd->ihex_size = 0;
+		goto exit;
+	}
+
+	recovery_hcd->ihex_size = pos + count;
+
+	retval = count;
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static int recovery_device_reset(void)
+{
+	int retval;
+	unsigned char command;
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	command = F35_RESET_COMMAND;
+
+	retval = syna_tcm_rmi_write(tcm_hcd,
+			recovery_hcd->f35_addr.control_base + F35_CTRL3_OFFSET,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write F$35 command\n");
+		return retval;
+	}
+
+	msleep(bdata->reset_delay_ms);
+
+	return 0;
+}
+
+static int recovery_add_data_entry(unsigned char data)
+{
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+
+	if (recovery_hcd->data_entries >= DATA_BUF_SIZE) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Reached data buffer size limit\n");
+		return -EINVAL;
+	}
+
+	recovery_hcd->data_buf[recovery_hcd->data_entries++] = data;
+
+	return 0;
+}
+
+static int recovery_add_padding(unsigned int *words)
+{
+	int retval;
+	unsigned int padding;
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+
+	padding = (F35_CHUNK_SIZE_WORDS - *words % F35_CHUNK_SIZE_WORDS);
+	padding %= F35_CHUNK_SIZE_WORDS;
+
+	while (padding) {
+		retval = recovery_add_data_entry(0xff);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to add data entry\n");
+			return retval;
+		}
+
+		retval = recovery_add_data_entry(0xff);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to add data entry\n");
+			return retval;
+		}
+
+		(*words)++;
+		padding--;
+	}
+
+	return 0;
+}
+
+static int recovery_parse_ihex(void)
+{
+	int retval;
+	unsigned char colon;
+	unsigned char *buf;
+	unsigned int addr;
+	unsigned int type;
+	unsigned int addrl;
+	unsigned int addrh;
+	unsigned int data0;
+	unsigned int data1;
+	unsigned int count;
+	unsigned int words;
+	unsigned int offset;
+	unsigned int record;
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+
+	words = 0;
+
+	offset = 0;
+
+	buf = recovery_hcd->ihex_buf;
+
+	recovery_hcd->data_entries = 0;
+
+	for (record = 0; record < recovery_hcd->ihex_records; record++) {
+		buf[(record + 1) * IHEX_RECORD_SIZE - 1] = 0x00;
+		retval = sscanf(&buf[record * IHEX_RECORD_SIZE],
+				"%c%02x%02x%02x%02x%02x%02x",
+				&colon,
+				&count,
+				&addrh,
+				&addrl,
+				&type,
+				&data0,
+				&data1);
+		if (retval != 7) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to read ihex record\n");
+			return -EINVAL;
+		}
+
+		if (type == 0x00) {
+			if ((words % F35_CHUNK_SIZE_WORDS) == 0) {
+				addr = (addrh << 8) + addrl;
+				addr += offset;
+				addr >>= 4;
+
+				retval = recovery_add_data_entry(addr);
+				if (retval < 0) {
+					LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to add data entry\n");
+					return retval;
+				}
+
+				retval = recovery_add_data_entry(addr >> 8);
+				if (retval < 0) {
+					LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to add data entry\n");
+					return retval;
+				}
+			}
+
+			retval = recovery_add_data_entry(data0);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to add data entry\n");
+				return retval;
+			}
+
+			retval = recovery_add_data_entry(data1);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to add data entry\n");
+				return retval;
+			}
+
+			words++;
+		} else if (type == 0x02) {
+			retval = recovery_add_padding(&words);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to add padding\n");
+				return retval;
+			}
+
+			offset = (data0 << 8) + data1;
+			offset <<= 4;
+		}
+	}
+
+	retval = recovery_add_padding(&words);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to add padding\n");
+		return retval;
+	}
+
+	return 0;
+}
+
+static int recovery_check_status(void)
+{
+	int retval;
+	unsigned char status;
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+
+	retval = syna_tcm_rmi_read(tcm_hcd,
+			recovery_hcd->f35_addr.data_base,
+			&status,
+			sizeof(status));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read status\n");
+		return retval;
+	}
+
+	status = status & 0x1f;
+
+	if (status != 0x00) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Recovery mode status = 0x%02x\n",
+				status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int recovery_write_flash(void)
+{
+	int retval;
+	unsigned char *data_ptr;
+	unsigned int chunk_buf_size;
+	unsigned int chunk_data_size;
+	unsigned int entries_written;
+	unsigned int entries_to_write;
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+
+	entries_written = 0;
+
+	data_ptr = recovery_hcd->data_buf;
+
+	chunk_buf_size = sizeof(recovery_hcd->chunk_buf);
+
+	chunk_data_size = chunk_buf_size - 1;
+
+	recovery_hcd->chunk_buf[chunk_buf_size - 1] = F35_WRITE_CHUNK_COMMAND;
+
+	while (entries_written < recovery_hcd->data_entries) {
+		entries_to_write = F35_CHUNK_SIZE + 2;
+
+		retval = secure_memcpy(recovery_hcd->chunk_buf,
+				chunk_buf_size - 1,
+				data_ptr,
+				recovery_hcd->data_entries - entries_written,
+				entries_to_write);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to copy chunk data\n");
+			return retval;
+		}
+
+		retval = syna_tcm_rmi_write(tcm_hcd,
+				recovery_hcd->f35_addr.control_base,
+				recovery_hcd->chunk_buf,
+				chunk_buf_size);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to write chunk data\n");
+			return retval;
+		}
+
+		data_ptr += entries_to_write;
+		entries_written += entries_to_write;
+	}
+
+	retval = recovery_check_status();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to get no error recovery mode status\n");
+		return retval;
+	}
+
+	return 0;
+}
+
+static int recovery_poll_erase_completion(void)
+{
+	int retval;
+	unsigned char status;
+	unsigned char command;
+	unsigned char data_base;
+	unsigned int timeout;
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+
+	timeout = F35_ERASE_ALL_WAIT_MS;
+
+	data_base = recovery_hcd->f35_addr.data_base;
+
+	do {
+		command = F35_READ_FLASH_STATUS_COMMAND;
+
+		retval = syna_tcm_rmi_write(tcm_hcd,
+				recovery_hcd->f35_addr.command_base,
+				&command,
+				sizeof(command));
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to write F$35 command\n");
+			return retval;
+		}
+
+		do {
+			retval = syna_tcm_rmi_read(tcm_hcd,
+					recovery_hcd->f35_addr.command_base,
+					&command,
+					sizeof(command));
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to read command status\n");
+				return retval;
+			}
+
+			if (command == 0x00)
+				break;
+
+			if (timeout == 0)
+				break;
+
+			msleep(F35_ERASE_ALL_POLL_MS);
+			timeout -= F35_ERASE_ALL_POLL_MS;
+		} while (true);
+
+		if (command != 0 && timeout == 0) {
+			retval = -EINVAL;
+			goto exit;
+		}
+
+		retval = syna_tcm_rmi_read(tcm_hcd,
+				data_base + F35_DATA5_OFFSET,
+				&status,
+				sizeof(status));
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to read flash status\n");
+			return retval;
+		}
+
+		if ((status & 0x01) == 0x00)
+			break;
+
+		if (timeout == 0) {
+			retval = -EINVAL;
+			goto exit;
+		}
+
+		msleep(F35_ERASE_ALL_POLL_MS);
+		timeout -= F35_ERASE_ALL_POLL_MS;
+	} while (true);
+
+	retval = 0;
+
+exit:
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to get erase completion\n");
+	}
+
+	return retval;
+}
+
+static int recovery_erase_flash(void)
+{
+	int retval;
+	unsigned char command;
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+
+	command = F35_ERASE_ALL_COMMAND;
+
+	retval = syna_tcm_rmi_write(tcm_hcd,
+			recovery_hcd->f35_addr.control_base + F35_CTRL3_OFFSET,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write F$35 command\n");
+		return retval;
+	}
+
+	if (recovery_hcd->f35_addr.command_base) {
+		retval = recovery_poll_erase_completion();
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to wait for erase completion\n");
+			return retval;
+		}
+	} else {
+		msleep(F35_ERASE_ALL_WAIT_MS);
+	}
+
+	retval = recovery_check_status();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to get no error recovery mode status\n");
+		return retval;
+	}
+
+	return 0;
+}
+
+static int recovery_set_up_recovery_mode(void)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	retval = tcm_hcd->identify(tcm_hcd, true);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do identification\n");
+		return retval;
+	}
+
+	if (tcm_hcd->id_info.mode == MODE_APPLICATION) {
+		retval = tcm_hcd->switch_mode(tcm_hcd, FW_MODE_BOOTLOADER);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enter bootloader mode\n");
+			return retval;
+		}
+	}
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			recovery_hcd->out_buf[0],
+			&recovery_hcd->out_buf[1],
+			2,
+			NULL,
+			NULL,
+			NULL,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_REBOOT_TO_ROM_BOOTLOADER));
+		return retval;
+	}
+
+	msleep(bdata->reset_delay_ms);
+
+	return 0;
+}
+
+static int recovery_do_recovery(void)
+{
+	int retval;
+	struct rmi_pdt_entry p_entry;
+	struct syna_tcm_hcd *tcm_hcd = recovery_hcd->tcm_hcd;
+
+	retval = recovery_parse_ihex();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to parse ihex data\n");
+		return retval;
+	}
+
+	if (recovery_hcd->set_up_recovery_mode) {
+		retval = recovery_set_up_recovery_mode();
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to set up recovery mode\n");
+			return retval;
+		}
+	}
+
+	tcm_hcd->update_watchdog(tcm_hcd, false);
+
+	retval = syna_tcm_rmi_read(tcm_hcd,
+			PDT_START_ADDR,
+			(unsigned char *)&p_entry,
+			sizeof(p_entry));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read PDT entry\n");
+		return retval;
+	}
+
+	if (p_entry.fn_number != UBL_FN_NUMBER) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to find F$35\n");
+		return -ENODEV;
+	}
+
+	recovery_hcd->f35_addr.query_base = p_entry.query_base_addr;
+	recovery_hcd->f35_addr.command_base = p_entry.command_base_addr;
+	recovery_hcd->f35_addr.control_base = p_entry.control_base_addr;
+	recovery_hcd->f35_addr.data_base = p_entry.data_base_addr;
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Start of recovery\n");
+
+	retval = recovery_erase_flash();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to erase flash\n");
+		return retval;
+	}
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Flash erased\n");
+
+	retval = recovery_write_flash();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write to flash\n");
+		return retval;
+	}
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Flash written\n");
+
+	retval = recovery_device_reset();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do reset\n");
+		return retval;
+	}
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"End of recovery\n");
+
+	if (recovery_hcd->set_up_recovery_mode)
+		return 0;
+
+	tcm_hcd->update_watchdog(tcm_hcd, true);
+
+	retval = tcm_hcd->enable_irq(tcm_hcd, true, NULL);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enable interrupt\n");
+		return retval;
+	}
+
+	retval = tcm_hcd->identify(tcm_hcd, true);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do identification\n");
+		return retval;
+	}
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION) {
+		retval = tcm_hcd->switch_mode(tcm_hcd, FW_MODE_APPLICATION);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to run application firmware\n");
+			return retval;
+		}
+	}
+
+	return 0;
+}
+
+static int recovery_init(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	int idx;
+
+	recovery_hcd = kzalloc(sizeof(*recovery_hcd), GFP_KERNEL);
+	if (!recovery_hcd) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for recovery_hcd\n");
+		return -ENOMEM;
+	}
+
+	recovery_hcd->ihex_buf = kzalloc(IHEX_BUF_SIZE, GFP_KERNEL);
+	if (!recovery_hcd->ihex_buf) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for ihex_buf\n");
+		goto err_allocate_ihex_buf;
+	}
+
+	recovery_hcd->data_buf = kzalloc(DATA_BUF_SIZE, GFP_KERNEL);
+	if (!recovery_hcd->data_buf) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for data_buf\n");
+		goto err_allocate_data_buf;
+	}
+
+	recovery_hcd->tcm_hcd = tcm_hcd;
+
+	recovery_hcd->set_up_recovery_mode = SET_UP_RECOVERY_MODE;
+
+	recovery_hcd->out_buf[0] = CMD_REBOOT_TO_ROM_BOOTLOADER;
+	recovery_hcd->out_buf[1] = 0;
+	recovery_hcd->out_buf[2] = 0;
+
+	if (!ENABLE_SYSFS_INTERFACE)
+		return 0;
+
+	recovery_hcd->sysfs_dir = kobject_create_and_add(SYSFS_DIR_NAME,
+			tcm_hcd->sysfs_dir);
+	if (!recovery_hcd->sysfs_dir) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create sysfs directory\n");
+		retval = -EINVAL;
+		goto err_sysfs_create_dir;
+	}
+
+	for (idx = 0; idx < ARRAY_SIZE(attrs); idx++) {
+		retval = sysfs_create_file(recovery_hcd->sysfs_dir,
+				&(*attrs[idx]).attr);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to create sysfs file\n");
+			goto err_sysfs_create_file;
+		}
+	}
+
+	retval = sysfs_create_bin_file(recovery_hcd->sysfs_dir, &bin_attr);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create sysfs bin file\n");
+		goto err_sysfs_create_bin_file;
+	}
+
+	return 0;
+
+err_sysfs_create_bin_file:
+err_sysfs_create_file:
+	for (idx--; idx >= 0; idx--)
+		sysfs_remove_file(recovery_hcd->sysfs_dir, &(*attrs[idx]).attr);
+
+	kobject_put(recovery_hcd->sysfs_dir);
+
+err_sysfs_create_dir:
+	kfree(recovery_hcd->data_buf);
+err_allocate_data_buf:
+	kfree(recovery_hcd->ihex_buf);
+err_allocate_ihex_buf:
+	kfree(recovery_hcd);
+	recovery_hcd = NULL;
+
+	return retval;
+}
+
+static int recovery_remove(struct syna_tcm_hcd *tcm_hcd)
+{
+	int idx;
+
+	if (!recovery_hcd)
+		goto exit;
+
+	if (ENABLE_SYSFS_INTERFACE) {
+		sysfs_remove_bin_file(recovery_hcd->sysfs_dir, &bin_attr);
+
+		for (idx = 0; idx < ARRAY_SIZE(attrs); idx++) {
+			sysfs_remove_file(recovery_hcd->sysfs_dir,
+					&(*attrs[idx]).attr);
+		}
+
+		kobject_put(recovery_hcd->sysfs_dir);
+	}
+
+	kfree(recovery_hcd->data_buf);
+	kfree(recovery_hcd->ihex_buf);
+	kfree(recovery_hcd);
+	recovery_hcd = NULL;
+
+exit:
+	complete(&recovery_remove_complete);
+
+	return 0;
+}
+
+static int recovery_reset(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+
+	if (!recovery_hcd) {
+		retval = recovery_init(tcm_hcd);
+		return retval;
+	}
+
+	return 0;
+}
+
+static struct syna_tcm_module_cb recovery_module = {
+	.type = TCM_RECOVERY,
+	.init = recovery_init,
+	.remove = recovery_remove,
+	.syncbox = NULL,
+	.asyncbox = NULL,
+	.reset = recovery_reset,
+	.suspend = NULL,
+	.resume = NULL,
+	.early_suspend = NULL,
+};
+
+static int __init recovery_module_init(void)
+{
+	return syna_tcm_add_module(&recovery_module, true);
+}
+
+static void __exit recovery_module_exit(void)
+{
+	syna_tcm_add_module(&recovery_module, false);
+
+	wait_for_completion(&recovery_remove_complete);
+}
+
+module_init(recovery_module_init);
+module_exit(recovery_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics TCM Recovery Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_reflash.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_reflash.c
new file mode 100644
index 0000000..934d090
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_reflash.c
@@ -0,0 +1,2193 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+#include "synaptics_tcm_core.h"
+
+#define STARTUP_REFLASH
+
+#define FORCE_REFLASH false
+
+#define ENABLE_SYSFS_INTERFACE true
+
+#define SYSFS_DIR_NAME "reflash"
+
+#define CUSTOM_DIR_NAME "custom"
+
+#define FW_IMAGE_NAME "synaptics_firmware.img"
+
+#define BOOT_CONFIG_ID "BOOT_CONFIG"
+
+#define APP_CODE_ID "APP_CODE"
+
+#define PROD_TEST_ID "APP_PROD_TEST"
+
+#define APP_CONFIG_ID "APP_CONFIG"
+
+#define DISP_CONFIG_ID "DISPLAY"
+
+#define FB_READY_COUNT 2
+
+#define FB_READY_WAIT_MS 100
+
+#define FB_READY_TIMEOUT_S 80
+
+#define IMAGE_FILE_MAGIC_VALUE 0x4818472b
+
+#define FLASH_AREA_MAGIC_VALUE 0x7c05e516
+
+#define BOOT_CONFIG_SIZE 8
+
+#define BOOT_CONFIG_SLOTS 16
+
+#define IMAGE_BUF_SIZE (512 * 1024)
+
+#define ERASE_FLASH_DELAY_MS 500
+
+#define WRITE_FLASH_DELAY_MS 20
+
+#define REFLASH (1 << 0)
+
+#define FORCE_UPDATE (1 << 1)
+
+#define APP_CFG_UPDATE (1 << 2)
+
+#define DISP_CFG_UPDATE (1 << 3)
+
+#define BOOT_CFG_UPDATE (1 << 4)
+
+#define BOOT_CFG_LOCKDOWN (1 << 5)
+
+#define reflash_write(p_name) \
+static int reflash_write_##p_name(void) \
+{ \
+	int retval; \
+	unsigned int size; \
+	unsigned int flash_addr; \
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd; \
+	const unsigned char *data; \
+\
+	data = reflash_hcd->image_info.p_name.data; \
+	size = reflash_hcd->image_info.p_name.size; \
+	flash_addr = reflash_hcd->image_info.p_name.flash_addr; \
+\
+	retval = reflash_write_flash(flash_addr, data, size); \
+	if (retval < 0) { \
+		LOGE(tcm_hcd->pdev->dev.parent, \
+				"Failed to write to flash\n"); \
+		return retval; \
+	} \
+\
+	return 0; \
+}
+
+#define reflash_erase(p_name) \
+static int reflash_erase_##p_name(void) \
+{ \
+	int retval; \
+	unsigned int size; \
+	unsigned int flash_addr; \
+	unsigned int page_start; \
+	unsigned int page_count; \
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd; \
+\
+	flash_addr = reflash_hcd->image_info.p_name.flash_addr; \
+\
+	page_start = flash_addr / reflash_hcd->page_size; \
+\
+	size = reflash_hcd->image_info.p_name.size; \
+	page_count = ceil_div(size, reflash_hcd->page_size); \
+\
+	LOGD(tcm_hcd->pdev->dev.parent, \
+			"Page start = %d\n", \
+			page_start); \
+\
+	LOGD(tcm_hcd->pdev->dev.parent, \
+			"Page count = %d\n", \
+			page_count); \
+\
+	retval = reflash_erase_flash(page_start, page_count); \
+	if (retval < 0) { \
+		LOGE(tcm_hcd->pdev->dev.parent, \
+				"Failed to erase flash pages\n"); \
+		return retval; \
+	} \
+\
+	return 0; \
+}
+
+#define reflash_update(p_name) \
+static int reflash_update_##p_name(bool reset) \
+{ \
+	int retval; \
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd; \
+\
+	retval = reflash_set_up_flash_access(); \
+	if (retval < 0) { \
+		LOGE(tcm_hcd->pdev->dev.parent, \
+				"Failed to set up flash access\n"); \
+		return retval; \
+	} \
+\
+	tcm_hcd->update_watchdog(tcm_hcd, false); \
+\
+	retval = reflash_check_##p_name(); \
+	if (retval < 0) { \
+		LOGE(tcm_hcd->pdev->dev.parent, \
+				"Failed "#p_name" partition check\n"); \
+		reset = true; \
+		goto reset; \
+	} \
+\
+	retval = reflash_erase_##p_name(); \
+	if (retval < 0) { \
+		LOGE(tcm_hcd->pdev->dev.parent, \
+				"Failed to erase "#p_name" partition\n"); \
+		reset = true; \
+		goto reset; \
+	} \
+\
+	LOGN(tcm_hcd->pdev->dev.parent, \
+			"Partition erased ("#p_name")\n"); \
+\
+	retval = reflash_write_##p_name(); \
+	if (retval < 0) { \
+		LOGE(tcm_hcd->pdev->dev.parent, \
+				"Failed to write "#p_name" partition\n"); \
+		reset = true; \
+		goto reset; \
+	} \
+\
+	LOGN(tcm_hcd->pdev->dev.parent, \
+			"Partition written ("#p_name")\n"); \
+\
+	retval = 0; \
+\
+reset: \
+	if (!reset) \
+		goto exit; \
+\
+	if (tcm_hcd->reset(tcm_hcd, false, true) < 0) { \
+		LOGE(tcm_hcd->pdev->dev.parent, \
+				"Failed to do reset\n"); \
+	} \
+\
+exit: \
+	tcm_hcd->update_watchdog(tcm_hcd, true); \
+\
+	return retval; \
+}
+
+#define reflash_show_data() \
+{ \
+	LOCK_BUFFER(reflash_hcd->read); \
+\
+	readlen = MIN(count, reflash_hcd->read.data_length - pos); \
+\
+	retval = secure_memcpy(buf, \
+			count, \
+			&reflash_hcd->read.buf[pos], \
+			reflash_hcd->read.buf_size - pos, \
+			readlen); \
+	if (retval < 0) { \
+		LOGE(tcm_hcd->pdev->dev.parent, \
+			"Failed to copy read data\n"); \
+	} else { \
+		retval = readlen; \
+	} \
+\
+	UNLOCK_BUFFER(reflash_hcd->read); \
+}
+
+enum update_area {
+	NONE = 0,
+	FIRMWARE_CONFIG,
+	CONFIG_ONLY,
+};
+
+struct app_config_header {
+	unsigned short magic_value[4];
+	unsigned char checksum[4];
+	unsigned char length[2];
+	unsigned char build_id[4];
+	unsigned char customer_config_id[16];
+};
+
+struct area_descriptor {
+	unsigned char magic_value[4];
+	unsigned char id_string[16];
+	unsigned char flags[4];
+	unsigned char flash_addr_words[4];
+	unsigned char length[4];
+	unsigned char checksum[4];
+};
+
+struct block_data {
+	const unsigned char *data;
+	unsigned int size;
+	unsigned int flash_addr;
+};
+
+struct image_info {
+	struct block_data boot_config;
+	struct block_data app_firmware;
+	struct block_data prod_test_firmware;
+	struct block_data app_config;
+	struct block_data disp_config;
+};
+
+struct image_header {
+	unsigned char magic_value[4];
+	unsigned char num_of_areas[4];
+};
+
+struct boot_config {
+	union {
+		unsigned char i2c_address;
+		struct {
+			unsigned char cpha:1;
+			unsigned char cpol:1;
+			unsigned char word0_b2__7:6;
+		} __packed;
+	};
+	unsigned char attn_polarity:1;
+	unsigned char attn_drive:2;
+	unsigned char attn_pullup:1;
+	unsigned char word0_b12__14:3;
+	unsigned char used:1;
+	unsigned short customer_part_id;
+	unsigned short boot_timeout;
+	unsigned short continue_on_reset:1;
+	unsigned short word3_b1__15:15;
+} __packed;
+
+struct reflash_hcd {
+	bool force_update;
+	bool disp_cfg_update;
+	const unsigned char *image;
+	unsigned char *image_buf;
+	unsigned int image_size;
+	unsigned int page_size;
+	unsigned int write_block_size;
+	unsigned int max_write_payload_size;
+	const struct firmware *fw_entry;
+	struct mutex reflash_mutex;
+	struct kobject *sysfs_dir;
+	struct kobject *custom_dir;
+	struct work_struct work;
+	struct workqueue_struct *workqueue;
+	struct image_info image_info;
+	struct syna_tcm_buffer out;
+	struct syna_tcm_buffer resp;
+	struct syna_tcm_buffer read;
+	struct syna_tcm_hcd *tcm_hcd;
+};
+
+DECLARE_COMPLETION(reflash_remove_complete);
+
+static struct reflash_hcd *reflash_hcd;
+
+static int reflash_get_fw_image(void);
+
+static int reflash_read_data(enum flash_area area, bool run_app_firmware,
+		struct syna_tcm_buffer *output);
+
+static int reflash_update_custom_otp(const unsigned char *data,
+		unsigned int offset, unsigned int datalen);
+
+static int reflash_update_custom_lcm(const unsigned char *data,
+		unsigned int offset, unsigned int datalen);
+
+static int reflash_update_custom_oem(const unsigned char *data,
+		unsigned int offset, unsigned int datalen);
+
+static int reflash_update_boot_config(bool lock);
+
+static int reflash_update_app_config(bool reset);
+
+static int reflash_update_disp_config(bool reset);
+
+static int reflash_do_reflash(void);
+
+STORE_PROTOTYPE(reflash, reflash);
+
+static struct device_attribute *attrs[] = {
+	ATTRIFY(reflash),
+};
+
+static ssize_t reflash_sysfs_image_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t reflash_sysfs_lockdown_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t reflash_sysfs_lockdown_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t reflash_sysfs_lcm_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t reflash_sysfs_lcm_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t reflash_sysfs_oem_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static ssize_t reflash_sysfs_oem_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static struct bin_attribute bin_attrs[] = {
+	{
+		.attr = {
+			.name = "image",
+			.mode = 0220,
+		},
+		.size = 0,
+		.write = reflash_sysfs_image_store,
+	},
+	{
+		.attr = {
+			.name = "lockdown",
+			.mode = 0664,
+		},
+		.size = 0,
+		.read = reflash_sysfs_lockdown_show,
+		.write = reflash_sysfs_lockdown_store,
+	},
+	{
+		.attr = {
+			.name = "lcm",
+			.mode = 0664,
+		},
+		.size = 0,
+		.read = reflash_sysfs_lcm_show,
+		.write = reflash_sysfs_lcm_store,
+	},
+	{
+		.attr = {
+			.name = "oem",
+			.mode = 0664,
+		},
+		.size = 0,
+		.read = reflash_sysfs_oem_show,
+		.write = reflash_sysfs_oem_store,
+	},
+};
+
+static ssize_t reflash_sysfs_reflash_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int retval;
+	unsigned int input;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	if (kstrtouint(buf, 10, &input))
+		return -EINVAL;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	pm_stay_awake(&tcm_hcd->pdev->dev);
+
+	mutex_lock(&reflash_hcd->reflash_mutex);
+
+	if (reflash_hcd->image_size != 0)
+		reflash_hcd->image = reflash_hcd->image_buf;
+
+	reflash_hcd->force_update = input & FORCE_UPDATE ? true : false;
+
+	if (input & REFLASH || input & FORCE_UPDATE) {
+		retval = reflash_do_reflash();
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do reflash\n");
+			goto exit;
+		}
+	}
+
+	if ((input & ~(REFLASH | FORCE_UPDATE)) == 0) {
+		retval = count;
+		goto exit;
+	}
+
+	retval = reflash_get_fw_image();
+	if (retval < 0) {
+		LOGD(tcm_hcd->pdev->dev.parent,
+				"Failed to get firmware image\n");
+		goto exit;
+	}
+
+	if (input & BOOT_CFG_LOCKDOWN) {
+		retval = reflash_update_boot_config(true);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to lockdown boot config\n");
+			goto exit;
+		}
+	} else if (input & BOOT_CFG_UPDATE) {
+		retval = reflash_update_boot_config(false);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to update boot config\n");
+			goto exit;
+		}
+	}
+
+	if (input & REFLASH || input & FORCE_UPDATE) {
+		retval = count;
+		goto exit;
+	}
+
+	if (input & DISP_CFG_UPDATE) {
+		if (input & APP_CFG_UPDATE)
+			retval = reflash_update_disp_config(false);
+		else
+			retval = reflash_update_disp_config(true);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to reflash display config\n");
+			goto exit;
+		}
+	}
+
+	if (input & APP_CFG_UPDATE) {
+		retval = reflash_update_app_config(true);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to reflash application config\n");
+			goto exit;
+		}
+	}
+
+	retval = count;
+
+exit:
+	if (reflash_hcd->fw_entry) {
+		release_firmware(reflash_hcd->fw_entry);
+		reflash_hcd->fw_entry = NULL;
+	}
+
+	reflash_hcd->image = NULL;
+	reflash_hcd->image_size = 0;
+	reflash_hcd->force_update = FORCE_REFLASH;
+
+	mutex_unlock(&reflash_hcd->reflash_mutex);
+
+	pm_relax(&tcm_hcd->pdev->dev);
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t reflash_sysfs_image_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	retval = secure_memcpy(&reflash_hcd->image_buf[pos],
+			IMAGE_BUF_SIZE - pos,
+			buf,
+			count,
+			count);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy firmware image data\n");
+		reflash_hcd->image_size = 0;
+		goto exit;
+	}
+
+	reflash_hcd->image_size = pos + count;
+
+	retval = count;
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t reflash_sysfs_lockdown_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int readlen;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	mutex_lock(&reflash_hcd->reflash_mutex);
+
+	retval = reflash_read_data(CUSTOM_OTP, true, NULL);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read lockdown data\n");
+		goto exit;
+	}
+
+	reflash_show_data();
+
+exit:
+	mutex_unlock(&reflash_hcd->reflash_mutex);
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t reflash_sysfs_lockdown_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	pm_stay_awake(&tcm_hcd->pdev->dev);
+
+	mutex_lock(&reflash_hcd->reflash_mutex);
+
+	retval = reflash_update_custom_otp(buf, pos, count);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to update custom OTP data\n");
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&reflash_hcd->reflash_mutex);
+
+	pm_relax(&tcm_hcd->pdev->dev);
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t reflash_sysfs_lcm_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int readlen;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	mutex_lock(&reflash_hcd->reflash_mutex);
+
+	retval = reflash_read_data(CUSTOM_LCM, true, NULL);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read LCM data\n");
+		goto exit;
+	}
+
+	reflash_show_data();
+
+exit:
+	mutex_unlock(&reflash_hcd->reflash_mutex);
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t reflash_sysfs_lcm_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	pm_stay_awake(&tcm_hcd->pdev->dev);
+
+	mutex_lock(&reflash_hcd->reflash_mutex);
+
+	retval = reflash_update_custom_lcm(buf, pos, count);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to update custom LCM data\n");
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&reflash_hcd->reflash_mutex);
+
+	pm_relax(&tcm_hcd->pdev->dev);
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t reflash_sysfs_oem_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int readlen;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	mutex_lock(&reflash_hcd->reflash_mutex);
+
+	retval = reflash_read_data(CUSTOM_OEM, true, NULL);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read OEM data\n");
+		goto exit;
+	}
+
+	reflash_show_data();
+
+exit:
+	mutex_unlock(&reflash_hcd->reflash_mutex);
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t reflash_sysfs_oem_store(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	pm_stay_awake(&tcm_hcd->pdev->dev);
+
+	mutex_lock(&reflash_hcd->reflash_mutex);
+
+	retval = reflash_update_custom_oem(buf, pos, count);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to update custom OEM data\n");
+		goto exit;
+	}
+
+	retval = count;
+
+exit:
+	mutex_unlock(&reflash_hcd->reflash_mutex);
+
+	pm_relax(&tcm_hcd->pdev->dev);
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static int reflash_set_up_flash_access(void)
+{
+	int retval;
+	unsigned int temp;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	retval = tcm_hcd->identify(tcm_hcd, true);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do identification\n");
+		return retval;
+	}
+
+	if (tcm_hcd->id_info.mode == MODE_APPLICATION) {
+		retval = tcm_hcd->switch_mode(tcm_hcd, FW_MODE_BOOTLOADER);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to enter bootloader mode\n");
+			return retval;
+		}
+	}
+
+	temp = tcm_hcd->boot_info.write_block_size_words;
+	reflash_hcd->write_block_size = temp * 2;
+
+	temp = le2_to_uint(tcm_hcd->boot_info.erase_page_size_words);
+	reflash_hcd->page_size = temp * 2;
+
+	temp = le2_to_uint(tcm_hcd->boot_info.max_write_payload_size);
+	reflash_hcd->max_write_payload_size = temp;
+
+	LOGD(tcm_hcd->pdev->dev.parent,
+			"Write block size = %d\n",
+			reflash_hcd->write_block_size);
+
+	LOGD(tcm_hcd->pdev->dev.parent,
+			"Page size = %d\n",
+			reflash_hcd->page_size);
+
+	LOGD(tcm_hcd->pdev->dev.parent,
+			"Max write payload size = %d\n",
+			reflash_hcd->max_write_payload_size);
+
+	if (reflash_hcd->write_block_size > (tcm_hcd->wr_chunk_size - 5)) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Write size greater than available chunk space\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int reflash_parse_fw_image(void)
+{
+	unsigned int idx;
+	unsigned int addr;
+	unsigned int offset;
+	unsigned int length;
+	unsigned int checksum;
+	unsigned int flash_addr;
+	unsigned int magic_value;
+	unsigned int num_of_areas;
+	struct image_header *header;
+	struct image_info *image_info;
+	struct area_descriptor *descriptor;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+	const unsigned char *image;
+	const unsigned char *content;
+
+	image = reflash_hcd->image;
+	image_info = &reflash_hcd->image_info;
+	header = (struct image_header *)image;
+
+	reflash_hcd->disp_cfg_update = false;
+
+	magic_value = le4_to_uint(header->magic_value);
+	if (magic_value != IMAGE_FILE_MAGIC_VALUE) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid image file magic value\n");
+		return -EINVAL;
+	}
+
+	memset(image_info, 0x00, sizeof(*image_info));
+
+	offset = sizeof(*header);
+	num_of_areas = le4_to_uint(header->num_of_areas);
+
+	for (idx = 0; idx < num_of_areas; idx++) {
+		addr = le4_to_uint(image + offset);
+		descriptor = (struct area_descriptor *)(image + addr);
+		offset += 4;
+
+		magic_value = le4_to_uint(descriptor->magic_value);
+		if (magic_value != FLASH_AREA_MAGIC_VALUE)
+			continue;
+
+		length = le4_to_uint(descriptor->length);
+		content = (unsigned char *)descriptor + sizeof(*descriptor);
+		flash_addr = le4_to_uint(descriptor->flash_addr_words) * 2;
+		checksum = le4_to_uint(descriptor->checksum);
+
+		if (!memcmp((char *)descriptor->id_string,
+				BOOT_CONFIG_ID,
+				strlen(BOOT_CONFIG_ID))) {
+			if (checksum != (crc32(~0, content, length) ^ ~0)) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Boot config checksum error\n");
+				return -EINVAL;
+			}
+			image_info->boot_config.size = length;
+			image_info->boot_config.data = content;
+			image_info->boot_config.flash_addr = flash_addr;
+			LOGD(tcm_hcd->pdev->dev.parent,
+					"Boot config size = %d\n",
+					length);
+			LOGD(tcm_hcd->pdev->dev.parent,
+					"Boot config flash address = 0x%08x\n",
+					flash_addr);
+		} else if (!memcmp((char *)descriptor->id_string,
+				APP_CODE_ID,
+				strlen(APP_CODE_ID))) {
+			if (checksum != (crc32(~0, content, length) ^ ~0)) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"APP firmware checksum error\n");
+				return -EINVAL;
+			}
+			image_info->app_firmware.size = length;
+			image_info->app_firmware.data = content;
+			image_info->app_firmware.flash_addr = flash_addr;
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Application firmware size = %d\n",
+				length);
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Application firmware flash address = 0x%08x\n",
+				flash_addr);
+		} else if (!memcmp((char *)descriptor->id_string,
+				PROD_TEST_ID,
+				strlen(PROD_TEST_ID))) {
+			if (checksum != (crc32(~0, content, length) ^ ~0)) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Production test checksum error\n");
+				return -EINVAL;
+			}
+			image_info->prod_test_firmware.size = length;
+			image_info->prod_test_firmware.data = content;
+			image_info->prod_test_firmware.flash_addr = flash_addr;
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Production test firmware size = %d\n",
+				length);
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Production test flash address = 0x%08x\n",
+				flash_addr);
+		} else if (!memcmp((char *)descriptor->id_string,
+				APP_CONFIG_ID,
+				strlen(APP_CONFIG_ID))) {
+			if (checksum != (crc32(~0, content, length) ^ ~0)) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Application config checksum error\n");
+				return -EINVAL;
+			}
+			image_info->app_config.size = length;
+			image_info->app_config.data = content;
+			image_info->app_config.flash_addr = flash_addr;
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Application config size = %d\n",
+				length);
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Application config flash address = 0x%08x\n",
+				flash_addr);
+		} else if (!memcmp((char *)descriptor->id_string,
+				DISP_CONFIG_ID,
+				strlen(DISP_CONFIG_ID))) {
+			if (checksum != (crc32(~0, content, length) ^ ~0)) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Display config checksum error\n");
+				return -EINVAL;
+			}
+			reflash_hcd->disp_cfg_update = true;
+			image_info->disp_config.size = length;
+			image_info->disp_config.data = content;
+			image_info->disp_config.flash_addr = flash_addr;
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Display config size = %d\n",
+				length);
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Display config flash address = 0x%08x\n",
+				flash_addr);
+		}
+	}
+
+	return 0;
+}
+
+static int reflash_get_fw_image(void)
+{
+	int retval;
+	const char *fw_name;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	if (bdata->fw_name)
+		fw_name = bdata->fw_name;
+	else
+		fw_name = FW_IMAGE_NAME;
+
+	if (reflash_hcd->image == NULL) {
+		retval = request_firmware(&reflash_hcd->fw_entry, fw_name,
+				tcm_hcd->pdev->dev.parent);
+		if (retval < 0) {
+			LOGD(tcm_hcd->pdev->dev.parent,
+					"Failed to request %s\n",
+					fw_name);
+			return retval;
+		}
+
+		LOGD(tcm_hcd->pdev->dev.parent,
+				"Firmware image size = %d\n",
+				(unsigned int)reflash_hcd->fw_entry->size);
+
+		reflash_hcd->image = reflash_hcd->fw_entry->data;
+		reflash_hcd->image_size = reflash_hcd->fw_entry->size;
+	}
+
+	retval = reflash_parse_fw_image();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to parse firmware image\n");
+		return retval;
+	}
+
+	return 0;
+}
+
+static enum update_area reflash_compare_id_info(void)
+{
+	enum update_area update_area;
+	unsigned int idx;
+	unsigned int image_fw_id;
+	unsigned int device_fw_id;
+	unsigned char *image_config_id;
+	unsigned char *device_config_id;
+	struct app_config_header *header;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+	const unsigned char *app_config_data;
+
+	update_area = NONE;
+
+	if (reflash_hcd->image_info.app_config.size < sizeof(*header)) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid application config in image file\n");
+		goto exit;
+	}
+
+	app_config_data = reflash_hcd->image_info.app_config.data;
+	header = (struct app_config_header *)app_config_data;
+
+	if (reflash_hcd->force_update) {
+		update_area = FIRMWARE_CONFIG;
+		goto exit;
+	}
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION) {
+		update_area = FIRMWARE_CONFIG;
+		goto exit;
+	}
+
+	image_fw_id = le4_to_uint(header->build_id);
+	device_fw_id = tcm_hcd->packrat_number;
+
+	if (image_fw_id > device_fw_id) {
+		LOGN(tcm_hcd->pdev->dev.parent,
+			"Image firmware ID newer than device firmware ID\n");
+		update_area = FIRMWARE_CONFIG;
+		goto exit;
+	} else if (image_fw_id < device_fw_id) {
+		LOGN(tcm_hcd->pdev->dev.parent,
+			"Image firmware ID older than device firmware ID\n");
+		update_area = NONE;
+		goto exit;
+	}
+
+	image_config_id = header->customer_config_id;
+	device_config_id = tcm_hcd->app_info.customer_config_id;
+
+	for (idx = 0; idx < 16; idx++) {
+		if (image_config_id[idx] > device_config_id[idx]) {
+			LOGN(tcm_hcd->pdev->dev.parent,
+				"Image config ID newer than device's ID\n");
+			update_area = CONFIG_ONLY;
+			goto exit;
+		} else if (image_config_id[idx] < device_config_id[idx]) {
+			LOGN(tcm_hcd->pdev->dev.parent,
+				"Image config ID older than device's ID\n");
+			update_area = NONE;
+			goto exit;
+		}
+	}
+
+	update_area = NONE;
+
+exit:
+	if (update_area == NONE)
+		LOGD(tcm_hcd->pdev->dev.parent, "No need to do reflash\n");
+	else
+		LOGD(tcm_hcd->pdev->dev.parent,
+				"Updating %s\n",
+				update_area == FIRMWARE_CONFIG ?
+				"firmware and config" :
+				"config only");
+
+	return update_area;
+}
+
+static int reflash_read_flash(unsigned int address, unsigned char *data,
+		unsigned int datalen)
+{
+	int retval;
+	unsigned int length_words;
+	unsigned int flash_addr_words;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	LOCK_BUFFER(reflash_hcd->out);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&reflash_hcd->out,
+			6);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for reflash_hcd->out.buf\n");
+		UNLOCK_BUFFER(reflash_hcd->out);
+		return retval;
+	}
+
+	length_words = datalen / 2;
+	flash_addr_words = address / 2;
+
+	reflash_hcd->out.buf[0] = (unsigned char)flash_addr_words;
+	reflash_hcd->out.buf[1] = (unsigned char)(flash_addr_words >> 8);
+	reflash_hcd->out.buf[2] = (unsigned char)(flash_addr_words >> 16);
+	reflash_hcd->out.buf[3] = (unsigned char)(flash_addr_words >> 24);
+	reflash_hcd->out.buf[4] = (unsigned char)length_words;
+	reflash_hcd->out.buf[5] = (unsigned char)(length_words >> 8);
+
+	LOCK_BUFFER(reflash_hcd->resp);
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_READ_FLASH,
+			reflash_hcd->out.buf,
+			6,
+			&reflash_hcd->resp.buf,
+			&reflash_hcd->resp.buf_size,
+			&reflash_hcd->resp.data_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_READ_FLASH));
+		UNLOCK_BUFFER(reflash_hcd->resp);
+		UNLOCK_BUFFER(reflash_hcd->out);
+		return retval;
+	}
+
+	UNLOCK_BUFFER(reflash_hcd->out);
+
+	if (reflash_hcd->resp.data_length != datalen) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read requested length\n");
+		UNLOCK_BUFFER(reflash_hcd->resp);
+		return -EIO;
+	}
+
+	retval = secure_memcpy(data,
+			datalen,
+			reflash_hcd->resp.buf,
+			reflash_hcd->resp.buf_size,
+			datalen);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy read data\n");
+		UNLOCK_BUFFER(reflash_hcd->resp);
+		return retval;
+	}
+
+	UNLOCK_BUFFER(reflash_hcd->resp);
+
+	return 0;
+}
+
+static int reflash_read_data(enum flash_area area, bool run_app_firmware,
+		struct syna_tcm_buffer *output)
+{
+	int retval;
+	unsigned int temp;
+	unsigned int addr;
+	unsigned int length;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_boot_info *boot_info;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	switch (area) {
+	case CUSTOM_LCM:
+	case CUSTOM_OEM:
+	case PPDT:
+		retval = tcm_hcd->get_data_location(tcm_hcd,
+				area,
+				&addr,
+				&length);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get data location\n");
+			return retval;
+		}
+		break;
+	default:
+		break;
+	}
+
+	retval = reflash_set_up_flash_access();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to set up flash access\n");
+		return retval;
+	}
+
+	app_info = &tcm_hcd->app_info;
+	boot_info = &tcm_hcd->boot_info;
+
+	switch (area) {
+	case BOOT_CONFIG:
+		temp = le2_to_uint(boot_info->boot_config_start_block);
+		addr = temp * reflash_hcd->write_block_size;
+		length = BOOT_CONFIG_SIZE * BOOT_CONFIG_SLOTS;
+		break;
+	case APP_CONFIG:
+		temp = le2_to_uint(app_info->app_config_start_write_block);
+		addr = temp * reflash_hcd->write_block_size;
+		length = le2_to_uint(app_info->app_config_size);
+		break;
+	case DISP_CONFIG:
+		temp = le4_to_uint(boot_info->display_config_start_block);
+		addr = temp * reflash_hcd->write_block_size;
+		temp = le2_to_uint(boot_info->display_config_length_blocks);
+		length = temp * reflash_hcd->write_block_size;
+		break;
+	case CUSTOM_OTP:
+		temp = le2_to_uint(boot_info->custom_otp_start_block);
+		addr = temp * reflash_hcd->write_block_size;
+		temp = le2_to_uint(boot_info->custom_otp_length_blocks);
+		length = temp * reflash_hcd->write_block_size;
+		break;
+	case CUSTOM_LCM:
+	case CUSTOM_OEM:
+	case PPDT:
+		addr *= reflash_hcd->write_block_size;
+		length *= reflash_hcd->write_block_size;
+		break;
+	default:
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid data area\n");
+		retval = -EINVAL;
+		goto run_app_firmware;
+	}
+
+	if (addr == 0 || length == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Data area unavailable\n");
+		retval = -EINVAL;
+		goto run_app_firmware;
+	}
+
+	LOCK_BUFFER(reflash_hcd->read);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&reflash_hcd->read,
+			length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for read.buf\n");
+		UNLOCK_BUFFER(reflash_hcd->read);
+		goto run_app_firmware;
+	}
+
+	retval = reflash_read_flash(addr, reflash_hcd->read.buf, length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read from flash\n");
+		UNLOCK_BUFFER(reflash_hcd->read);
+		goto run_app_firmware;
+	}
+
+	reflash_hcd->read.data_length = length;
+
+	if (output != NULL) {
+		retval = syna_tcm_alloc_mem(tcm_hcd,
+				output,
+				length);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for output->buf\n");
+			UNLOCK_BUFFER(reflash_hcd->read);
+			goto run_app_firmware;
+		}
+
+		retval = secure_memcpy(output->buf,
+				output->buf_size,
+				reflash_hcd->read.buf,
+				reflash_hcd->read.buf_size,
+				length);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to copy read data\n");
+			UNLOCK_BUFFER(reflash_hcd->read);
+			goto run_app_firmware;
+		}
+
+		output->data_length = length;
+	}
+
+	UNLOCK_BUFFER(reflash_hcd->read);
+
+	retval = 0;
+
+run_app_firmware:
+	if (!run_app_firmware)
+		goto exit;
+
+	if (tcm_hcd->switch_mode(tcm_hcd, FW_MODE_APPLICATION) < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run application firmware\n");
+	}
+
+exit:
+	return retval;
+}
+
+static int reflash_check_boot_config(void)
+{
+	unsigned int temp;
+	unsigned int image_addr;
+	unsigned int device_addr;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	if (reflash_hcd->image_info.boot_config.size < BOOT_CONFIG_SIZE) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"No valid boot config in image file\n");
+		return -EINVAL;
+	}
+
+	image_addr = reflash_hcd->image_info.boot_config.flash_addr;
+
+	temp = le2_to_uint(tcm_hcd->boot_info.boot_config_start_block);
+	device_addr = temp * reflash_hcd->write_block_size;
+
+	if (image_addr != device_addr) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Flash address mismatch\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int reflash_check_app_config(void)
+{
+	unsigned int temp;
+	unsigned int image_addr;
+	unsigned int image_size;
+	unsigned int device_addr;
+	unsigned int device_size;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	if (reflash_hcd->image_info.app_config.size == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"No application config in image file\n");
+		return -EINVAL;
+	}
+
+	image_addr = reflash_hcd->image_info.app_config.flash_addr;
+	image_size = reflash_hcd->image_info.app_config.size;
+
+	temp = le2_to_uint(tcm_hcd->app_info.app_config_start_write_block);
+	device_addr = temp * reflash_hcd->write_block_size;
+	device_size = le2_to_uint(tcm_hcd->app_info.app_config_size);
+
+	if (device_addr == 0 && device_size == 0)
+		return 0;
+
+	if (image_addr != device_addr) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Flash address mismatch\n");
+		return -EINVAL;
+	}
+
+	if (image_size != device_size) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Config size mismatch\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int reflash_check_disp_config(void)
+{
+	unsigned int temp;
+	unsigned int image_addr;
+	unsigned int image_size;
+	unsigned int device_addr;
+	unsigned int device_size;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	if (reflash_hcd->image_info.disp_config.size == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"No display config in image file\n");
+		return -EINVAL;
+	}
+
+	image_addr = reflash_hcd->image_info.disp_config.flash_addr;
+	image_size = reflash_hcd->image_info.disp_config.size;
+
+	temp = le4_to_uint(tcm_hcd->boot_info.display_config_start_block);
+	device_addr = temp * reflash_hcd->write_block_size;
+
+	temp = le2_to_uint(tcm_hcd->boot_info.display_config_length_blocks);
+	device_size = temp * reflash_hcd->write_block_size;
+
+	if (image_addr != device_addr) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Flash address mismatch\n");
+		return -EINVAL;
+	}
+
+	if (image_size != device_size) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Config size mismatch\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int reflash_check_prod_test_firmware(void)
+{
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	if (reflash_hcd->image_info.prod_test_firmware.size == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"No production test firmware in image file\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int reflash_check_app_firmware(void)
+{
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	if (reflash_hcd->image_info.app_firmware.size == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"No application firmware in image file\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int reflash_write_flash(unsigned int address, const unsigned char *data,
+		unsigned int datalen)
+{
+	int retval;
+	unsigned int offset;
+	unsigned int w_length;
+	unsigned int xfer_length;
+	unsigned int remaining_length;
+	unsigned int flash_address;
+	unsigned int block_address;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	w_length = tcm_hcd->wr_chunk_size - 5;
+
+	w_length = w_length - (w_length % reflash_hcd->write_block_size);
+
+	w_length = MIN(w_length, reflash_hcd->max_write_payload_size);
+
+	offset = 0;
+
+	remaining_length = datalen;
+
+	LOCK_BUFFER(reflash_hcd->out);
+	LOCK_BUFFER(reflash_hcd->resp);
+
+	while (remaining_length) {
+		if (remaining_length > w_length)
+			xfer_length = w_length;
+		else
+			xfer_length = remaining_length;
+
+		retval = syna_tcm_alloc_mem(tcm_hcd,
+				&reflash_hcd->out,
+				xfer_length + 2);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for out.buf\n");
+			UNLOCK_BUFFER(reflash_hcd->resp);
+			UNLOCK_BUFFER(reflash_hcd->out);
+			return retval;
+		}
+
+		flash_address = address + offset;
+		block_address = flash_address / reflash_hcd->write_block_size;
+		reflash_hcd->out.buf[0] = (unsigned char)block_address;
+		reflash_hcd->out.buf[1] = (unsigned char)(block_address >> 8);
+
+		retval = secure_memcpy(&reflash_hcd->out.buf[2],
+				reflash_hcd->out.buf_size - 2,
+				&data[offset],
+				datalen - offset,
+				xfer_length);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to copy write data\n");
+			UNLOCK_BUFFER(reflash_hcd->resp);
+			UNLOCK_BUFFER(reflash_hcd->out);
+			return retval;
+		}
+
+		retval = tcm_hcd->write_message(tcm_hcd,
+				CMD_WRITE_FLASH,
+				reflash_hcd->out.buf,
+				xfer_length + 2,
+				&reflash_hcd->resp.buf,
+				&reflash_hcd->resp.buf_size,
+				&reflash_hcd->resp.data_length,
+				NULL,
+				WRITE_FLASH_DELAY_MS);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to write command %s\n",
+					STR(CMD_WRITE_FLASH));
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Flash address = 0x%08x\n",
+					flash_address);
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Data length = %d\n",
+					xfer_length);
+			UNLOCK_BUFFER(reflash_hcd->resp);
+			UNLOCK_BUFFER(reflash_hcd->out);
+			return retval;
+		}
+
+		offset += xfer_length;
+		remaining_length -= xfer_length;
+	}
+
+	UNLOCK_BUFFER(reflash_hcd->resp);
+	UNLOCK_BUFFER(reflash_hcd->out);
+
+	return 0;
+}
+
+reflash_write(app_config)
+
+reflash_write(disp_config)
+
+reflash_write(prod_test_firmware)
+
+reflash_write(app_firmware)
+
+static int reflash_erase_flash(unsigned int page_start, unsigned int page_count)
+{
+	int retval;
+	unsigned char out_buf[2];
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	out_buf[0] = (unsigned char)page_start;
+	out_buf[1] = (unsigned char)page_count;
+
+	LOCK_BUFFER(reflash_hcd->resp);
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_ERASE_FLASH,
+			out_buf,
+			sizeof(out_buf),
+			&reflash_hcd->resp.buf,
+			&reflash_hcd->resp.buf_size,
+			&reflash_hcd->resp.data_length,
+			NULL,
+			ERASE_FLASH_DELAY_MS);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_ERASE_FLASH));
+		UNLOCK_BUFFER(reflash_hcd->resp);
+		return retval;
+	}
+
+	UNLOCK_BUFFER(reflash_hcd->resp);
+
+	return 0;
+}
+
+reflash_erase(app_config)
+
+reflash_erase(disp_config)
+
+reflash_erase(prod_test_firmware)
+
+reflash_erase(app_firmware)
+
+static int reflash_update_custom_otp(const unsigned char *data,
+		unsigned int offset, unsigned int datalen)
+{
+	int retval;
+	unsigned int temp;
+	unsigned int addr;
+	unsigned int length;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	retval = reflash_set_up_flash_access();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to set up flash access\n");
+		return retval;
+	}
+
+	tcm_hcd->update_watchdog(tcm_hcd, false);
+
+	temp = le2_to_uint(tcm_hcd->boot_info.custom_otp_start_block);
+	addr = temp * reflash_hcd->write_block_size;
+
+	temp = le2_to_uint(tcm_hcd->boot_info.custom_otp_length_blocks);
+	length = temp * reflash_hcd->write_block_size;
+
+	if (addr == 0 || length == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Data area unavailable\n");
+		retval = -EINVAL;
+		goto run_app_firmware;
+	}
+
+	if (datalen + offset > length) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid data length\n");
+		retval = -EINVAL;
+		goto run_app_firmware;
+	}
+
+	retval = reflash_write_flash(addr + offset,
+			data,
+			datalen);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write to flash\n");
+		goto run_app_firmware;
+	}
+
+	retval = 0;
+
+run_app_firmware:
+	if (tcm_hcd->switch_mode(tcm_hcd, FW_MODE_APPLICATION) < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run application firmware\n");
+	}
+
+	tcm_hcd->update_watchdog(tcm_hcd, true);
+
+	return retval;
+}
+
+static int reflash_update_custom_lcm(const unsigned char *data,
+		unsigned int offset, unsigned int datalen)
+{
+	int retval;
+	unsigned int addr;
+	unsigned int length;
+	unsigned int page_start;
+	unsigned int page_count;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	retval = tcm_hcd->get_data_location(tcm_hcd,
+			CUSTOM_LCM,
+			&addr,
+			&length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to get data location\n");
+		return retval;
+	}
+
+	retval = reflash_set_up_flash_access();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to set up flash access\n");
+		return retval;
+	}
+
+	tcm_hcd->update_watchdog(tcm_hcd, false);
+
+	addr *= reflash_hcd->write_block_size;
+	length *= reflash_hcd->write_block_size;
+
+	if (addr == 0 || length == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Data area unavailable\n");
+		retval = -EINVAL;
+		goto run_app_firmware;
+	}
+
+	if (datalen + offset > length) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid data length\n");
+		retval = -EINVAL;
+		goto run_app_firmware;
+	}
+
+	if (offset == 0) {
+		page_start = addr / reflash_hcd->page_size;
+
+		page_count = ceil_div(length, reflash_hcd->page_size);
+
+		retval = reflash_erase_flash(page_start, page_count);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to erase flash pages\n");
+			goto run_app_firmware;
+		}
+	}
+
+	retval = reflash_write_flash(addr + offset,
+			data,
+			datalen);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write to flash\n");
+		goto run_app_firmware;
+	}
+
+	retval = 0;
+
+run_app_firmware:
+	if (tcm_hcd->switch_mode(tcm_hcd, FW_MODE_APPLICATION) < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run application firmware\n");
+	}
+
+	tcm_hcd->update_watchdog(tcm_hcd, true);
+
+	return retval;
+}
+
+static int reflash_update_custom_oem(const unsigned char *data,
+		unsigned int offset, unsigned int datalen)
+{
+	int retval;
+	unsigned int addr;
+	unsigned int length;
+	unsigned int page_start;
+	unsigned int page_count;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	retval = tcm_hcd->get_data_location(tcm_hcd,
+			CUSTOM_OEM,
+			&addr,
+			&length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to get data location\n");
+		return retval;
+	}
+
+	retval = reflash_set_up_flash_access();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to set up flash access\n");
+		return retval;
+	}
+
+	tcm_hcd->update_watchdog(tcm_hcd, false);
+
+	addr *= reflash_hcd->write_block_size;
+	length *= reflash_hcd->write_block_size;
+
+	if (addr == 0 || length == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Data area unavailable\n");
+		retval = -EINVAL;
+		goto run_app_firmware;
+	}
+
+	if (datalen + offset > length) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid data length\n");
+		retval = -EINVAL;
+		goto run_app_firmware;
+	}
+
+	if (offset == 0) {
+		page_start = addr / reflash_hcd->page_size;
+
+		page_count = ceil_div(length, reflash_hcd->page_size);
+
+		retval = reflash_erase_flash(page_start, page_count);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to erase flash pages\n");
+			goto run_app_firmware;
+		}
+	}
+
+	retval = reflash_write_flash(addr + offset,
+			data,
+			datalen);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write to flash\n");
+		goto run_app_firmware;
+	}
+
+	retval = 0;
+
+run_app_firmware:
+	if (tcm_hcd->switch_mode(tcm_hcd, FW_MODE_APPLICATION) < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run application firmware\n");
+	}
+
+	tcm_hcd->update_watchdog(tcm_hcd, true);
+
+	return retval;
+}
+
+static int reflash_update_boot_config(bool lock)
+{
+	int retval;
+	unsigned char slot_used;
+	unsigned int idx;
+	unsigned int addr;
+	struct boot_config *data;
+	struct boot_config *last_slot;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	retval = reflash_set_up_flash_access();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to set up flash access\n");
+		return retval;
+	}
+
+	tcm_hcd->update_watchdog(tcm_hcd, false);
+
+	retval = reflash_check_boot_config();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed boot_config partition check\n");
+		goto reset;
+	}
+
+	retval = reflash_read_data(BOOT_CONFIG, false, NULL);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read boot config\n");
+		goto reset;
+	}
+
+	LOCK_BUFFER(reflash_hcd->read);
+
+	data = (struct boot_config *)reflash_hcd->read.buf;
+	last_slot = data + (BOOT_CONFIG_SLOTS - 1);
+	slot_used = tcm_hcd->id_info.mode == MODE_TDDI_BOOTLOADER ? 0 : 1;
+
+	if (last_slot->used == slot_used) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Boot config already locked down\n");
+		UNLOCK_BUFFER(reflash_hcd->read);
+		goto reset;
+	}
+
+	if (lock) {
+		idx = BOOT_CONFIG_SLOTS - 1;
+	} else {
+		for (idx = 0; idx < BOOT_CONFIG_SLOTS; idx++) {
+			if (data->used == slot_used) {
+				data++;
+				continue;
+			} else {
+				break;
+			}
+		}
+	}
+
+	UNLOCK_BUFFER(reflash_hcd->read);
+
+	if (idx == BOOT_CONFIG_SLOTS) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"No free boot config slot available\n");
+		goto reset;
+	}
+
+	addr += idx * BOOT_CONFIG_SIZE;
+
+	retval = reflash_write_flash(addr,
+			reflash_hcd->image_info.boot_config.data,
+			BOOT_CONFIG_SIZE);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write to flash\n");
+		goto reset;
+	}
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Slot %d updated with new boot config\n",
+			idx);
+
+	retval = 0;
+
+reset:
+	if (tcm_hcd->reset(tcm_hcd, false, true) < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do reset\n");
+	}
+
+	tcm_hcd->update_watchdog(tcm_hcd, true);
+
+	return retval;
+}
+
+reflash_update(app_config)
+
+reflash_update(disp_config)
+
+reflash_update(prod_test_firmware)
+
+reflash_update(app_firmware)
+
+static int reflash_do_reflash(void)
+{
+	int retval;
+	enum update_area update_area;
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+	retval = reflash_get_fw_image();
+	if (retval < 0) {
+		LOGD(tcm_hcd->pdev->dev.parent,
+				"Failed to get firmware image\n");
+		goto exit;
+	}
+
+	LOGD(tcm_hcd->pdev->dev.parent,
+			"Start of reflash\n");
+
+	atomic_set(&tcm_hcd->firmware_flashing, 1);
+
+	update_area = reflash_compare_id_info();
+
+	switch (update_area) {
+	case FIRMWARE_CONFIG:
+		retval = reflash_update_app_firmware(false);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to reflash application firmware\n");
+			goto exit;
+		}
+		memset(&tcm_hcd->app_info, 0x00, sizeof(tcm_hcd->app_info));
+		if (tcm_hcd->features.dual_firmware) {
+			retval = reflash_update_prod_test_firmware(false);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to reflash production test\n");
+				goto exit;
+			}
+		}
+	case CONFIG_ONLY:
+		if (reflash_hcd->disp_cfg_update) {
+			retval = reflash_update_disp_config(false);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to reflash display config\n");
+				goto exit;
+			}
+		}
+		retval = reflash_update_app_config(true);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to reflash application config\n");
+			goto exit;
+		}
+		break;
+	case NONE:
+	default:
+		break;
+	}
+
+	LOGD(tcm_hcd->pdev->dev.parent,
+			"End of reflash\n");
+
+	retval = 0;
+
+exit:
+	if (reflash_hcd->fw_entry) {
+		release_firmware(reflash_hcd->fw_entry);
+		reflash_hcd->fw_entry = NULL;
+		reflash_hcd->image = NULL;
+		reflash_hcd->image_size = 0;
+	}
+
+	atomic_set(&tcm_hcd->firmware_flashing, 0);
+	wake_up_interruptible(&tcm_hcd->reflash_wq);
+	return retval;
+}
+
+#ifdef STARTUP_REFLASH
+static void reflash_startup_work(struct work_struct *work)
+{
+	int retval;
+#if defined(CONFIG_DRM) || defined(CONFIG_FB)
+	unsigned int timeout;
+#endif
+	struct syna_tcm_hcd *tcm_hcd = reflash_hcd->tcm_hcd;
+
+#if defined(CONFIG_DRM) || defined(CONFIG_FB)
+	timeout = FB_READY_TIMEOUT_S * 1000 / FB_READY_WAIT_MS;
+
+	while (tcm_hcd->fb_ready != FB_READY_COUNT - 1) {
+		if (timeout == 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Timed out waiting for FB ready\n");
+			return;
+		}
+		msleep(FB_READY_WAIT_MS);
+		timeout--;
+	}
+#endif
+
+	pm_stay_awake(&tcm_hcd->pdev->dev);
+
+	mutex_lock(&reflash_hcd->reflash_mutex);
+
+	retval = reflash_do_reflash();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do reflash\n");
+	}
+
+	mutex_unlock(&reflash_hcd->reflash_mutex);
+
+	pm_relax(&tcm_hcd->pdev->dev);
+}
+#endif
+
+static int reflash_init(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	int idx;
+
+	reflash_hcd = kzalloc(sizeof(*reflash_hcd), GFP_KERNEL);
+	if (!reflash_hcd) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for reflash_hcd\n");
+		return -ENOMEM;
+	}
+
+	reflash_hcd->image_buf = kzalloc(IMAGE_BUF_SIZE, GFP_KERNEL);
+	if (!reflash_hcd->image_buf) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for image_buf\n");
+		goto err_allocate_memory;
+	}
+
+	reflash_hcd->tcm_hcd = tcm_hcd;
+
+	reflash_hcd->force_update = FORCE_REFLASH;
+
+	mutex_init(&reflash_hcd->reflash_mutex);
+
+	INIT_BUFFER(reflash_hcd->out, false);
+	INIT_BUFFER(reflash_hcd->resp, false);
+	INIT_BUFFER(reflash_hcd->read, false);
+
+#ifdef STARTUP_REFLASH
+	reflash_hcd->workqueue =
+			create_singlethread_workqueue("syna_tcm_reflash");
+	INIT_WORK(&reflash_hcd->work, reflash_startup_work);
+	queue_work(reflash_hcd->workqueue, &reflash_hcd->work);
+#endif
+
+	if (!ENABLE_SYSFS_INTERFACE)
+		return 0;
+
+	reflash_hcd->sysfs_dir = kobject_create_and_add(SYSFS_DIR_NAME,
+			tcm_hcd->sysfs_dir);
+	if (!reflash_hcd->sysfs_dir) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create sysfs directory\n");
+		retval = -EINVAL;
+		goto err_sysfs_create_dir;
+	}
+
+	for (idx = 0; idx < ARRAY_SIZE(attrs); idx++) {
+		retval = sysfs_create_file(reflash_hcd->sysfs_dir,
+				&(*attrs[idx]).attr);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to create sysfs file\n");
+			goto err_sysfs_create_file;
+		}
+	}
+
+	retval = sysfs_create_bin_file(reflash_hcd->sysfs_dir, &bin_attrs[0]);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create sysfs bin file\n");
+		goto err_sysfs_create_bin_file;
+	}
+
+	reflash_hcd->custom_dir = kobject_create_and_add(CUSTOM_DIR_NAME,
+			reflash_hcd->sysfs_dir);
+	if (!reflash_hcd->custom_dir) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create custom sysfs directory\n");
+		retval = -EINVAL;
+		goto err_custom_sysfs_create_dir;
+	}
+
+	for (idx = 1; idx < ARRAY_SIZE(bin_attrs); idx++) {
+		retval = sysfs_create_bin_file(reflash_hcd->custom_dir,
+				&bin_attrs[idx]);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to create sysfs bin file\n");
+			goto err_custom_sysfs_create_bin_file;
+		}
+	}
+
+	tcm_hcd->read_flash_data = reflash_read_data;
+
+	return 0;
+
+err_custom_sysfs_create_bin_file:
+	for (idx--; idx > 0; idx--)
+		sysfs_remove_bin_file(reflash_hcd->custom_dir, &bin_attrs[idx]);
+
+	kobject_put(reflash_hcd->custom_dir);
+
+	idx = ARRAY_SIZE(attrs);
+
+err_custom_sysfs_create_dir:
+	sysfs_remove_bin_file(reflash_hcd->sysfs_dir, &bin_attrs[0]);
+
+err_sysfs_create_bin_file:
+err_sysfs_create_file:
+	for (idx--; idx >= 0; idx--)
+		sysfs_remove_file(reflash_hcd->sysfs_dir, &(*attrs[idx]).attr);
+
+	kobject_put(reflash_hcd->sysfs_dir);
+
+err_sysfs_create_dir:
+err_allocate_memory:
+	kfree(reflash_hcd->image_buf);
+
+	RELEASE_BUFFER(reflash_hcd->read);
+	RELEASE_BUFFER(reflash_hcd->resp);
+	RELEASE_BUFFER(reflash_hcd->out);
+
+	kfree(reflash_hcd);
+	reflash_hcd = NULL;
+
+	return retval;
+}
+
+static int reflash_remove(struct syna_tcm_hcd *tcm_hcd)
+{
+	int idx;
+
+	if (!reflash_hcd)
+		goto exit;
+
+	tcm_hcd->read_flash_data = NULL;
+
+	if (ENABLE_SYSFS_INTERFACE) {
+		for (idx = 1; idx < ARRAY_SIZE(bin_attrs); idx++) {
+			sysfs_remove_bin_file(reflash_hcd->custom_dir,
+					&bin_attrs[idx]);
+		}
+
+		kobject_put(reflash_hcd->custom_dir);
+
+		sysfs_remove_bin_file(reflash_hcd->sysfs_dir, &bin_attrs[0]);
+
+		for (idx = 0; idx < ARRAY_SIZE(attrs); idx++) {
+			sysfs_remove_file(reflash_hcd->sysfs_dir,
+					&(*attrs[idx]).attr);
+		}
+
+		kobject_put(reflash_hcd->sysfs_dir);
+	}
+
+#ifdef STARTUP_REFLASH
+	cancel_work_sync(&reflash_hcd->work);
+	flush_workqueue(reflash_hcd->workqueue);
+	destroy_workqueue(reflash_hcd->workqueue);
+#endif
+
+	kfree(reflash_hcd->image_buf);
+
+	RELEASE_BUFFER(reflash_hcd->read);
+	RELEASE_BUFFER(reflash_hcd->resp);
+	RELEASE_BUFFER(reflash_hcd->out);
+
+	kfree(reflash_hcd);
+	reflash_hcd = NULL;
+
+exit:
+	complete(&reflash_remove_complete);
+
+	return 0;
+}
+
+static int reflash_reset(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+
+	if (!reflash_hcd) {
+		retval = reflash_init(tcm_hcd);
+		return retval;
+	}
+
+	return 0;
+}
+
+static struct syna_tcm_module_cb reflash_module = {
+	.type = TCM_REFLASH,
+	.init = reflash_init,
+	.remove = reflash_remove,
+	.syncbox = NULL,
+	.asyncbox = NULL,
+	.reset = reflash_reset,
+	.suspend = NULL,
+	.resume = NULL,
+	.early_suspend = NULL,
+};
+
+static int __init reflash_module_init(void)
+{
+	return syna_tcm_add_module(&reflash_module, true);
+}
+
+static void __exit reflash_module_exit(void)
+{
+	syna_tcm_add_module(&reflash_module, false);
+
+	wait_for_completion(&reflash_remove_complete);
+}
+
+module_init(reflash_module_init);
+module_exit(reflash_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics TCM Reflash Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_spi.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_spi.c
new file mode 100644
index 0000000..e54269c
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_spi.c
@@ -0,0 +1,670 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/of_gpio.h>
+#include "synaptics_tcm_core.h"
+
+static unsigned char *buf;
+
+static unsigned int buf_size;
+
+static struct spi_transfer *xfer;
+
+static struct syna_tcm_bus_io bus_io;
+
+static struct syna_tcm_hw_interface hw_if;
+
+static struct platform_device *syna_tcm_spi_device;
+
+#ifdef CONFIG_OF
+static int parse_dt(struct device *dev, struct syna_tcm_board_data *bdata)
+{
+	int retval;
+	u32 value;
+	struct property *prop;
+	struct device_node *np = dev->of_node;
+	const char *name;
+
+	prop = of_find_property(np, "synaptics,irq-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->irq_gpio = of_get_named_gpio_flags(np,
+				"synaptics,irq-gpio", 0,
+				(enum of_gpio_flags *)&bdata->irq_flags);
+	} else {
+		bdata->irq_gpio = -1;
+	}
+
+	retval = of_property_read_u32(np, "synaptics,irq-on-state", &value);
+	if (retval < 0)
+		bdata->irq_on_state = 0;
+	else
+		bdata->irq_on_state = value;
+
+	retval = of_property_read_string(np, "synaptics,pwr-reg-name", &name);
+	if (retval < 0)
+		bdata->pwr_reg_name = NULL;
+	else
+		bdata->pwr_reg_name = name;
+
+	retval = of_property_read_string(np, "synaptics,bus-reg-name", &name);
+	if (retval < 0)
+		bdata->bus_reg_name = NULL;
+	else
+		bdata->bus_reg_name = name;
+
+	prop = of_find_property(np, "synaptics,power-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->power_gpio = of_get_named_gpio_flags(np,
+				"synaptics,power-gpio", 0, NULL);
+	} else {
+		bdata->power_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,power-on-state", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-on-state",
+				&value);
+		if (retval < 0) {
+			LOGE(dev,
+				"Failed to read synaptics,power-on-state\n");
+			return retval;
+		}
+		bdata->power_on_state = value;
+	} else {
+		bdata->power_on_state = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,power-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,power-delay-ms",
+				&value);
+		if (retval < 0) {
+			LOGE(dev, "Failed to read synaptics,power-delay-ms\n");
+			return retval;
+		}
+		bdata->power_delay_ms = value;
+	} else {
+		bdata->power_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-gpio", NULL);
+	if (prop && prop->length) {
+		bdata->reset_gpio = of_get_named_gpio_flags(np,
+				"synaptics,reset-gpio", 0, NULL);
+	} else {
+		bdata->reset_gpio = -1;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-on-state", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-on-state",
+				&value);
+		if (retval < 0) {
+			LOGE(dev, "Failed to read synaptics,reset-on-state\n");
+			return retval;
+		}
+		bdata->reset_on_state = value;
+	} else {
+		bdata->reset_on_state = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-active-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-active-ms",
+				&value);
+		if (retval < 0) {
+			LOGE(dev, "Failed to read synaptics,reset-active-ms\n");
+			return retval;
+		}
+		bdata->reset_active_ms = value;
+	} else {
+		bdata->reset_active_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,reset-delay-ms", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,reset-delay-ms",
+				&value);
+		if (retval < 0) {
+			LOGE(dev, "Unable to read synaptics,reset-delay-ms\n");
+			return retval;
+		}
+		bdata->reset_delay_ms = value;
+	} else {
+		bdata->reset_delay_ms = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,x-flip", NULL);
+	bdata->x_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,y-flip", NULL);
+	bdata->y_flip = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,swap-axes", NULL);
+	bdata->swap_axes = prop > 0 ? true : false;
+
+	prop = of_find_property(np, "synaptics,byte-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,byte-delay-us",
+				&value);
+		if (retval < 0) {
+			LOGE(dev, "Unable to read synaptics,byte-delay-us\n");
+			return retval;
+		}
+		bdata->byte_delay_us = value;
+	} else {
+		bdata->byte_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,block-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,block-delay-us",
+				&value);
+		if (retval < 0) {
+			LOGE(dev, "Unable to read synaptics,block-delay-us\n");
+			return retval;
+		}
+		bdata->block_delay_us = value;
+	} else {
+		bdata->block_delay_us = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,spi-mode", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,spi-mode",
+				&value);
+		if (retval < 0) {
+			LOGE(dev, "Unable to read synaptics,spi-mode\n");
+			return retval;
+		}
+		bdata->spi_mode = value;
+
+	} else {
+		bdata->spi_mode = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,ubl-max-freq", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ubl-max-freq",
+				&value);
+		if (retval < 0) {
+			LOGE(dev, "Unable to read synaptics,ubl-max-freq\n");
+			return retval;
+		}
+		bdata->ubl_max_freq = value;
+	} else {
+		bdata->ubl_max_freq = 0;
+	}
+
+	prop = of_find_property(np, "synaptics,ubl-byte-delay-us", NULL);
+	if (prop && prop->length) {
+		retval = of_property_read_u32(np, "synaptics,ubl-byte-delay-us",
+				&value);
+		if (retval < 0) {
+			LOGE(dev,
+				"Unable to read synaptics,ubl-byte-delay-us\n");
+			return retval;
+		}
+		bdata->ubl_byte_delay_us = value;
+	} else {
+		bdata->ubl_byte_delay_us = 0;
+	}
+
+	return 0;
+}
+#endif
+
+static int syna_tcm_spi_alloc_mem(struct syna_tcm_hcd *tcm_hcd,
+		unsigned int count, unsigned int size)
+{
+	static unsigned int xfer_count;
+	struct spi_device *spi = to_spi_device(tcm_hcd->pdev->dev.parent);
+
+	if (count > xfer_count) {
+		kfree(xfer);
+		xfer = kcalloc(count, sizeof(*xfer), GFP_KERNEL);
+		if (!xfer) {
+			LOGE(&spi->dev,
+					"Failed to allocate memory for xfer\n");
+			xfer_count = 0;
+			return -ENOMEM;
+		}
+		xfer_count = count;
+	} else {
+		memset(xfer, 0, count * sizeof(*xfer));
+	}
+
+	if (size > buf_size) {
+		if (buf_size)
+			kfree(buf);
+		buf = kmalloc(size, GFP_KERNEL);
+		if (!buf) {
+			LOGE(&spi->dev,
+					"Failed to allocate memory for buf\n");
+			buf_size = 0;
+			return -ENOMEM;
+		}
+		buf_size = size;
+	}
+
+	return 0;
+}
+
+static int syna_tcm_spi_rmi_read(struct syna_tcm_hcd *tcm_hcd,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned int idx;
+	unsigned int mode;
+	unsigned int byte_count;
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(tcm_hcd->pdev->dev.parent);
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	mutex_lock(&tcm_hcd->io_ctrl_mutex);
+
+	spi_message_init(&msg);
+
+	byte_count = length + 2;
+
+	if (bdata->ubl_byte_delay_us == 0)
+		retval = syna_tcm_spi_alloc_mem(tcm_hcd, 2, byte_count);
+	else
+		retval = syna_tcm_spi_alloc_mem(tcm_hcd, byte_count, 3);
+	if (retval < 0) {
+		LOGE(&spi->dev,
+				"Failed to allocate memory\n");
+		goto exit;
+	}
+
+	buf[0] = (unsigned char)(addr >> 8) | 0x80;
+	buf[1] = (unsigned char)addr;
+
+	if (bdata->ubl_byte_delay_us == 0) {
+		xfer[0].len = 2;
+		xfer[0].tx_buf = buf;
+		xfer[0].speed_hz = bdata->ubl_max_freq;
+		spi_message_add_tail(&xfer[0], &msg);
+		memset(&buf[2], 0xff, length);
+		xfer[1].len = length;
+		xfer[1].tx_buf = &buf[2];
+		xfer[1].rx_buf = data;
+		if (bdata->block_delay_us)
+			xfer[1].delay_usecs = bdata->block_delay_us;
+		xfer[1].speed_hz = bdata->ubl_max_freq;
+		spi_message_add_tail(&xfer[1], &msg);
+	} else {
+		buf[2] = 0xff;
+		for (idx = 0; idx < byte_count; idx++) {
+			xfer[idx].len = 1;
+			if (idx < 2) {
+				xfer[idx].tx_buf = &buf[idx];
+			} else {
+				xfer[idx].tx_buf = &buf[2];
+				xfer[idx].rx_buf = &data[idx - 2];
+			}
+			xfer[idx].delay_usecs = bdata->ubl_byte_delay_us;
+			if (bdata->block_delay_us && (idx == byte_count - 1))
+				xfer[idx].delay_usecs = bdata->block_delay_us;
+			xfer[idx].speed_hz = bdata->ubl_max_freq;
+			spi_message_add_tail(&xfer[idx], &msg);
+		}
+	}
+
+	mode = spi->mode;
+	spi->mode = SPI_MODE_3;
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = length;
+	} else {
+		LOGE(&spi->dev,
+				"Failed to complete SPI transfer, error = %d\n",
+				retval);
+	}
+
+	spi->mode = mode;
+
+exit:
+	mutex_unlock(&tcm_hcd->io_ctrl_mutex);
+
+	return retval;
+}
+
+static int syna_tcm_spi_rmi_write(struct syna_tcm_hcd *tcm_hcd,
+		unsigned short addr, unsigned char *data, unsigned int length)
+{
+	int retval;
+	unsigned int mode;
+	unsigned int byte_count;
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(tcm_hcd->pdev->dev.parent);
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	mutex_lock(&tcm_hcd->io_ctrl_mutex);
+
+	spi_message_init(&msg);
+
+	byte_count = length + 2;
+
+	retval = syna_tcm_spi_alloc_mem(tcm_hcd, 1, byte_count);
+	if (retval < 0) {
+		LOGE(&spi->dev,
+				"Failed to allocate memory\n");
+		goto exit;
+	}
+
+	buf[0] = (unsigned char)(addr >> 8) & ~0x80;
+	buf[1] = (unsigned char)addr;
+	retval = secure_memcpy(&buf[2],
+			buf_size - 2,
+			data,
+			length,
+			length);
+	if (retval < 0) {
+		LOGE(&spi->dev,
+				"Failed to copy write data\n");
+		goto exit;
+	}
+
+	xfer[0].len = byte_count;
+	xfer[0].tx_buf = buf;
+	if (bdata->block_delay_us)
+		xfer[0].delay_usecs = bdata->block_delay_us;
+	spi_message_add_tail(&xfer[0], &msg);
+
+	mode = spi->mode;
+	spi->mode = SPI_MODE_3;
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = length;
+	} else {
+		LOGE(&spi->dev,
+				"Failed to complete SPI transfer, error = %d\n",
+				retval);
+	}
+
+	spi->mode = mode;
+
+exit:
+	mutex_unlock(&tcm_hcd->io_ctrl_mutex);
+
+	return retval;
+}
+
+static int syna_tcm_spi_read(struct syna_tcm_hcd *tcm_hcd, unsigned char *data,
+		unsigned int length)
+{
+	int retval;
+	unsigned int idx;
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(tcm_hcd->pdev->dev.parent);
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	mutex_lock(&tcm_hcd->io_ctrl_mutex);
+
+	spi_message_init(&msg);
+
+	if (bdata->byte_delay_us == 0)
+		retval = syna_tcm_spi_alloc_mem(tcm_hcd, 1, length);
+	else
+		retval = syna_tcm_spi_alloc_mem(tcm_hcd, length, 1);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory\n");
+		goto exit;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		memset(buf, 0xff, length);
+		xfer[0].len = length;
+		xfer[0].tx_buf = buf;
+		xfer[0].rx_buf = data;
+		if (bdata->block_delay_us)
+			xfer[0].delay_usecs = bdata->block_delay_us;
+		spi_message_add_tail(&xfer[0], &msg);
+	} else {
+		buf[0] = 0xff;
+		for (idx = 0; idx < length; idx++) {
+			xfer[idx].len = 1;
+			xfer[idx].tx_buf = buf;
+			xfer[idx].rx_buf = &data[idx];
+			xfer[idx].delay_usecs = bdata->byte_delay_us;
+			if (bdata->block_delay_us && (idx == length - 1))
+				xfer[idx].delay_usecs = bdata->block_delay_us;
+			spi_message_add_tail(&xfer[idx], &msg);
+		}
+	}
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = length;
+	} else {
+		LOGE(&spi->dev,
+				"Failed to complete SPI transfer, error = %d\n",
+				retval);
+	}
+
+exit:
+	mutex_unlock(&tcm_hcd->io_ctrl_mutex);
+
+	return retval;
+}
+
+static int syna_tcm_spi_write(struct syna_tcm_hcd *tcm_hcd, unsigned char *data,
+		unsigned int length)
+{
+	int retval;
+	unsigned int idx;
+	struct spi_message msg;
+	struct spi_device *spi = to_spi_device(tcm_hcd->pdev->dev.parent);
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	mutex_lock(&tcm_hcd->io_ctrl_mutex);
+
+	spi_message_init(&msg);
+
+	if (bdata->byte_delay_us == 0)
+		retval = syna_tcm_spi_alloc_mem(tcm_hcd, 1, 0);
+	else
+		retval = syna_tcm_spi_alloc_mem(tcm_hcd, length, 0);
+	if (retval < 0) {
+		LOGE(&spi->dev,
+				"Failed to allocate memory\n");
+		goto exit;
+	}
+
+	if (bdata->byte_delay_us == 0) {
+		xfer[0].len = length;
+		xfer[0].tx_buf = data;
+		if (bdata->block_delay_us)
+			xfer[0].delay_usecs = bdata->block_delay_us;
+		spi_message_add_tail(&xfer[0], &msg);
+	} else {
+		for (idx = 0; idx < length; idx++) {
+			xfer[idx].len = 1;
+			xfer[idx].tx_buf = &data[idx];
+			xfer[idx].delay_usecs = bdata->byte_delay_us;
+			if (bdata->block_delay_us && (idx == length - 1))
+				xfer[idx].delay_usecs = bdata->block_delay_us;
+			spi_message_add_tail(&xfer[idx], &msg);
+		}
+	}
+
+	retval = spi_sync(spi, &msg);
+	if (retval == 0) {
+		retval = length;
+	} else {
+		LOGE(&spi->dev,
+				"Failed to complete SPI transfer, error = %d\n",
+				retval);
+	}
+
+exit:
+	mutex_unlock(&tcm_hcd->io_ctrl_mutex);
+
+	return retval;
+}
+
+static int syna_tcm_spi_probe(struct spi_device *spi)
+{
+	int retval;
+
+	if (spi->master->flags & SPI_MASTER_HALF_DUPLEX) {
+		LOGE(&spi->dev,
+				"Full duplex not supported by host\n");
+		return -EIO;
+	}
+
+	syna_tcm_spi_device = platform_device_alloc(PLATFORM_DRIVER_NAME, 0);
+	if (!syna_tcm_spi_device) {
+		LOGE(&spi->dev,
+				"Failed to allocate platform device\n");
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_OF
+	hw_if.bdata = devm_kzalloc(&spi->dev, sizeof(*hw_if.bdata), GFP_KERNEL);
+	if (!hw_if.bdata) {
+		LOGE(&spi->dev,
+				"Failed to allocate memory for board data\n");
+		return -ENOMEM;
+	}
+	parse_dt(&spi->dev, hw_if.bdata);
+#else
+	hw_if.bdata = spi->dev.platform_data;
+#endif
+
+	switch (hw_if.bdata->spi_mode) {
+	case 0:
+		spi->mode = SPI_MODE_0;
+		break;
+	case 1:
+		spi->mode = SPI_MODE_1;
+		break;
+	case 2:
+		spi->mode = SPI_MODE_2;
+		break;
+	case 3:
+		spi->mode = SPI_MODE_3;
+		break;
+	}
+
+	bus_io.type = BUS_SPI;
+	bus_io.read = syna_tcm_spi_read;
+	bus_io.write = syna_tcm_spi_write;
+	bus_io.rmi_read = syna_tcm_spi_rmi_read;
+	bus_io.rmi_write = syna_tcm_spi_rmi_write;
+
+	hw_if.bus_io = &bus_io;
+
+	spi->bits_per_word = 8;
+
+	retval = spi_setup(spi);
+	if (retval < 0) {
+		LOGE(&spi->dev,
+				"Failed to set up SPI protocol driver\n");
+		return retval;
+	}
+
+	syna_tcm_spi_device->dev.parent = &spi->dev;
+	syna_tcm_spi_device->dev.platform_data = &hw_if;
+
+	retval = platform_device_add(syna_tcm_spi_device);
+	if (retval < 0) {
+		LOGE(&spi->dev,
+				"Failed to add platform device\n");
+		return retval;
+	}
+
+	return 0;
+}
+
+static int syna_tcm_spi_remove(struct spi_device *spi)
+{
+	syna_tcm_spi_device->dev.platform_data = NULL;
+
+	platform_device_unregister(syna_tcm_spi_device);
+
+	return 0;
+}
+
+static const struct spi_device_id syna_tcm_id_table[] = {
+	{SPI_MODULE_NAME, 0},
+	{},
+};
+MODULE_DEVICE_TABLE(spi, syna_tcm_id_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id syna_tcm_of_match_table[] = {
+	{
+		.compatible = "synaptics,tcm-spi",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, syna_tcm_of_match_table);
+#else
+#define syna_tcm_of_match_table NULL
+#endif
+
+static struct spi_driver syna_tcm_spi_driver = {
+	.driver = {
+		.name = SPI_MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = syna_tcm_of_match_table,
+	},
+	.probe = syna_tcm_spi_probe,
+	.remove = syna_tcm_spi_remove,
+	.id_table = syna_tcm_id_table,
+};
+
+int syna_tcm_bus_init(void)
+{
+	return spi_register_driver(&syna_tcm_spi_driver);
+}
+EXPORT_SYMBOL(syna_tcm_bus_init);
+
+void syna_tcm_bus_exit(void)
+{
+	kfree(buf);
+
+	kfree(xfer);
+
+	spi_unregister_driver(&syna_tcm_spi_driver);
+}
+EXPORT_SYMBOL(syna_tcm_bus_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics TCM SPI Bus Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_testing.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_testing.c
new file mode 100644
index 0000000..b1921f7
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_testing.c
@@ -0,0 +1,1938 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/gpio.h>
+#include "synaptics_tcm_core.h"
+#include "synaptics_tcm_testing.h"
+
+#define SYSFS_DIR_NAME "testing"
+
+#define REPORT_TIMEOUT_MS 500
+
+#define testing_sysfs_show(t_name) \
+static ssize_t testing_sysfs_##t_name##_show(struct device *dev, \
+		struct device_attribute *attr, char *buf) \
+{ \
+	int retval; \
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd; \
+\
+	mutex_lock(&tcm_hcd->extif_mutex); \
+\
+	retval = testing_##t_name(); \
+	if (retval < 0) { \
+		LOGE(tcm_hcd->pdev->dev.parent, \
+				"Failed to do "#t_name" test\n"); \
+		goto exit; \
+	} \
+\
+	retval = snprintf(buf, PAGE_SIZE, \
+			"%s\n", \
+			testing_hcd->result ? "Passed" : "Failed"); \
+\
+exit: \
+	mutex_unlock(&tcm_hcd->extif_mutex); \
+\
+	return retval; \
+}
+
+enum test_code {
+	TEST_TRX_TRX_SHORTS = 0,
+	TEST_TRX_SENSOR_OPENS = 1,
+	TEST_TRX_GROUND_SHORTS = 2,
+	TEST_DYNAMIC_RANGE = 7,
+	TEST_OPEN_SHORT_DETECTOR = 8,
+	TEST_NOISE = 10,
+	TEST_PT11 = 11,
+	TEST_PT12 = 12,
+	TEST_PT13 = 13,
+	TEST_DYNAMIC_RANGE_DOZE = 14,
+	TEST_NOISE_DOZE = 15,
+};
+
+struct testing_hcd {
+	bool result;
+	unsigned char report_type;
+	unsigned int report_index;
+	unsigned int num_of_reports;
+	struct kobject *sysfs_dir;
+	struct syna_tcm_buffer out;
+	struct syna_tcm_buffer resp;
+	struct syna_tcm_buffer report;
+	struct syna_tcm_buffer process;
+	struct syna_tcm_buffer output;
+	struct syna_tcm_hcd *tcm_hcd;
+	int (*collect_reports)(enum report_type report_type,
+			unsigned int num_of_reports);
+};
+
+DECLARE_COMPLETION(report_complete);
+
+DECLARE_COMPLETION(testing_remove_complete);
+
+static struct testing_hcd *testing_hcd;
+
+static int testing_dynamic_range(void);
+
+static int testing_dynamic_range_lpwg(void);
+
+static int testing_dynamic_range_doze(void);
+
+static int testing_noise(void);
+
+static int testing_noise_lpwg(void);
+
+static int testing_noise_doze(void);
+
+static int testing_open_short_detector(void);
+
+static int testing_pt11(void);
+
+static int testing_pt12(void);
+
+static int testing_pt13(void);
+
+static int testing_reset_open(void);
+
+static int testing_lockdown(void);
+
+static int testing_trx(enum test_code test_code);
+
+SHOW_PROTOTYPE(testing, dynamic_range);
+SHOW_PROTOTYPE(testing, dynamic_range_lpwg);
+SHOW_PROTOTYPE(testing, dynamic_range_doze);
+SHOW_PROTOTYPE(testing, noise);
+SHOW_PROTOTYPE(testing, noise_lpwg);
+SHOW_PROTOTYPE(testing, noise_doze);
+SHOW_PROTOTYPE(testing, open_short_detector);
+SHOW_PROTOTYPE(testing, pt11);
+SHOW_PROTOTYPE(testing, pt12);
+SHOW_PROTOTYPE(testing, pt13);
+SHOW_PROTOTYPE(testing, reset_open);
+SHOW_PROTOTYPE(testing, lockdown);
+SHOW_PROTOTYPE(testing, trx_trx_shorts);
+SHOW_PROTOTYPE(testing, trx_sensor_opens);
+SHOW_PROTOTYPE(testing, trx_ground_shorts);
+SHOW_PROTOTYPE(testing, size);
+
+static struct device_attribute *attrs[] = {
+	ATTRIFY(dynamic_range),
+	ATTRIFY(dynamic_range_lpwg),
+	ATTRIFY(dynamic_range_doze),
+	ATTRIFY(noise),
+	ATTRIFY(noise_lpwg),
+	ATTRIFY(noise_doze),
+	ATTRIFY(open_short_detector),
+	ATTRIFY(pt11),
+	ATTRIFY(pt12),
+	ATTRIFY(pt13),
+	ATTRIFY(reset_open),
+	ATTRIFY(lockdown),
+	ATTRIFY(trx_trx_shorts),
+	ATTRIFY(trx_sensor_opens),
+	ATTRIFY(trx_ground_shorts),
+	ATTRIFY(size),
+};
+
+static ssize_t testing_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count);
+
+static struct bin_attribute bin_attr = {
+	.attr = {
+		.name = "data",
+		.mode = 0444,
+	},
+	.size = 0,
+	.read = testing_sysfs_data_show,
+};
+
+testing_sysfs_show(dynamic_range)
+
+testing_sysfs_show(dynamic_range_lpwg)
+
+testing_sysfs_show(dynamic_range_doze)
+
+testing_sysfs_show(noise)
+
+testing_sysfs_show(noise_lpwg)
+
+testing_sysfs_show(noise_doze)
+
+testing_sysfs_show(open_short_detector)
+
+testing_sysfs_show(pt11)
+
+testing_sysfs_show(pt12)
+
+testing_sysfs_show(pt13)
+
+testing_sysfs_show(reset_open)
+
+testing_sysfs_show(lockdown)
+
+static ssize_t testing_sysfs_trx_trx_shorts_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	retval = testing_trx(TEST_TRX_TRX_SHORTS);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do TRX-TRX shorts test\n");
+		goto exit;
+	}
+
+	retval = snprintf(buf, PAGE_SIZE,
+			"%s\n",
+			testing_hcd->result ? "Passed" : "Failed");
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t testing_sysfs_trx_sensor_opens_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	retval = testing_trx(TEST_TRX_SENSOR_OPENS);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do TRX-sensor opens test\n");
+		goto exit;
+	}
+
+	retval = snprintf(buf, PAGE_SIZE,
+			"%s\n",
+			testing_hcd->result ? "Passed" : "Failed");
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t testing_sysfs_trx_ground_shorts_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	retval = testing_trx(TEST_TRX_GROUND_SHORTS);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do TRX-ground shorts test\n");
+		goto exit;
+	}
+
+	retval = snprintf(buf, PAGE_SIZE,
+			"%s\n",
+			testing_hcd->result ? "Passed" : "Failed");
+
+exit:
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t testing_sysfs_size_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	LOCK_BUFFER(testing_hcd->output);
+
+	retval = snprintf(buf, PAGE_SIZE,
+			"%u\n",
+			testing_hcd->output.data_length);
+
+	UNLOCK_BUFFER(testing_hcd->output);
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static ssize_t testing_sysfs_data_show(struct file *data_file,
+		struct kobject *kobj, struct bin_attribute *attributes,
+		char *buf, loff_t pos, size_t count)
+{
+	int retval;
+	unsigned int readlen;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	mutex_lock(&tcm_hcd->extif_mutex);
+
+	LOCK_BUFFER(testing_hcd->output);
+
+	readlen = MIN(count, testing_hcd->output.data_length - pos);
+
+	retval = secure_memcpy(buf,
+			count,
+			&testing_hcd->output.buf[pos],
+			testing_hcd->output.buf_size - pos,
+			readlen);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to copy report data\n");
+	} else {
+		retval = readlen;
+	}
+
+	UNLOCK_BUFFER(testing_hcd->output);
+
+	mutex_unlock(&tcm_hcd->extif_mutex);
+
+	return retval;
+}
+
+static int testing_run_prod_test_item(enum test_code test_code)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	if (tcm_hcd->features.dual_firmware &&
+			tcm_hcd->id_info.mode != MODE_PRODUCTION_TEST) {
+		retval = tcm_hcd->switch_mode(tcm_hcd, FW_MODE_PRODUCTION_TEST);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to run production test firmware\n");
+			return retval;
+		}
+	} else if (tcm_hcd->id_info.mode != MODE_APPLICATION ||
+			tcm_hcd->app_status != APP_STATUS_OK) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Application firmware not running\n");
+		return -ENODEV;
+	}
+
+	LOCK_BUFFER(testing_hcd->out);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&testing_hcd->out,
+			1);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for testing_hcd->out.buf\n");
+		UNLOCK_BUFFER(testing_hcd->out);
+		return retval;
+	}
+
+	testing_hcd->out.buf[0] = test_code;
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_PRODUCTION_TEST,
+			testing_hcd->out.buf,
+			1,
+			&testing_hcd->resp.buf,
+			&testing_hcd->resp.buf_size,
+			&testing_hcd->resp.data_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_PRODUCTION_TEST));
+		UNLOCK_BUFFER(testing_hcd->resp);
+		UNLOCK_BUFFER(testing_hcd->out);
+		return retval;
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+	UNLOCK_BUFFER(testing_hcd->out);
+
+	return 0;
+}
+
+static int testing_collect_reports(enum report_type report_type,
+		unsigned int num_of_reports)
+{
+	int retval;
+	bool completed;
+	unsigned int timeout;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	testing_hcd->report_index = 0;
+	testing_hcd->report_type = report_type;
+	testing_hcd->num_of_reports = num_of_reports;
+
+	reinit_completion(&report_complete);
+
+	LOCK_BUFFER(testing_hcd->out);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&testing_hcd->out,
+			1);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for testing_hcd->out.buf\n");
+		UNLOCK_BUFFER(testing_hcd->out);
+		goto exit;
+	}
+
+	testing_hcd->out.buf[0] = testing_hcd->report_type;
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_ENABLE_REPORT,
+			testing_hcd->out.buf,
+			1,
+			&testing_hcd->resp.buf,
+			&testing_hcd->resp.buf_size,
+			&testing_hcd->resp.data_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_ENABLE_REPORT));
+		UNLOCK_BUFFER(testing_hcd->resp);
+		UNLOCK_BUFFER(testing_hcd->out);
+		goto exit;
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+	UNLOCK_BUFFER(testing_hcd->out);
+
+	completed = false;
+	timeout = REPORT_TIMEOUT_MS * num_of_reports;
+
+	retval = wait_for_completion_timeout(&report_complete,
+			msecs_to_jiffies(timeout));
+	if (retval == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Timed out waiting for report collection\n");
+	} else {
+		completed = true;
+	}
+
+	LOCK_BUFFER(testing_hcd->out);
+
+	testing_hcd->out.buf[0] = testing_hcd->report_type;
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_DISABLE_REPORT,
+			testing_hcd->out.buf,
+			1,
+			&testing_hcd->resp.buf,
+			&testing_hcd->resp.buf_size,
+			&testing_hcd->resp.data_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_DISABLE_REPORT));
+		UNLOCK_BUFFER(testing_hcd->resp);
+		UNLOCK_BUFFER(testing_hcd->out);
+		goto exit;
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+	UNLOCK_BUFFER(testing_hcd->out);
+
+	if (completed)
+		retval = 0;
+	else
+		retval = -EIO;
+
+exit:
+	testing_hcd->report_type = 0;
+
+	return retval;
+}
+
+static void testing_get_frame_size_words(unsigned int *size, bool image_only)
+{
+	unsigned int rows;
+	unsigned int cols;
+	unsigned int hybrid;
+	unsigned int buttons;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	rows = le2_to_uint(app_info->num_of_image_rows);
+	cols = le2_to_uint(app_info->num_of_image_cols);
+	hybrid = le2_to_uint(app_info->has_hybrid_data);
+	buttons = le2_to_uint(app_info->num_of_buttons);
+
+	*size = rows * cols;
+
+	if (!image_only) {
+		if (hybrid)
+			*size += rows + cols;
+		*size += buttons;
+	}
+}
+
+static void testing_doze_frame_output(unsigned int rows, unsigned int cols)
+{
+	int retval;
+	unsigned int data_size;
+	unsigned int header_size;
+	unsigned int output_size;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	header_size = 2;
+
+	data_size = rows * cols;
+
+	if (le2_to_uint(app_info->num_of_buttons))
+		data_size++;
+
+	output_size = header_size + data_size * 2;
+
+	LOCK_BUFFER(testing_hcd->output);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&testing_hcd->output,
+			output_size);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for testing_hcd->output.buf\n");
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	testing_hcd->output.buf[0] = rows;
+	testing_hcd->output.buf[1] = cols;
+
+	output_size = header_size;
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	retval = secure_memcpy(testing_hcd->output.buf + header_size,
+			testing_hcd->output.buf_size - header_size,
+			testing_hcd->resp.buf,
+			testing_hcd->resp.buf_size,
+			testing_hcd->resp.data_length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy test data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	output_size += testing_hcd->resp.data_length;
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_hcd->output.data_length = output_size;
+
+	UNLOCK_BUFFER(testing_hcd->output);
+}
+
+static void testing_standard_frame_output(bool image_only)
+{
+	int retval;
+	unsigned int data_size;
+	unsigned int header_size;
+	unsigned int output_size;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	testing_get_frame_size_words(&data_size, image_only);
+
+	header_size = sizeof(app_info->num_of_buttons) +
+			sizeof(app_info->num_of_image_rows) +
+			sizeof(app_info->num_of_image_cols) +
+			sizeof(app_info->has_hybrid_data);
+
+	output_size = header_size + data_size * 2;
+
+	LOCK_BUFFER(testing_hcd->output);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&testing_hcd->output,
+			output_size);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for testing_hcd->output.buf\n");
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	retval = secure_memcpy(testing_hcd->output.buf,
+			testing_hcd->output.buf_size,
+			&app_info->num_of_buttons[0],
+			header_size,
+			header_size);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy header data\n");
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	output_size = header_size;
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	retval = secure_memcpy(testing_hcd->output.buf + header_size,
+			testing_hcd->output.buf_size - header_size,
+			testing_hcd->resp.buf,
+			testing_hcd->resp.buf_size,
+			testing_hcd->resp.data_length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy test data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	output_size += testing_hcd->resp.data_length;
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_hcd->output.data_length = output_size;
+
+	UNLOCK_BUFFER(testing_hcd->output);
+}
+
+static int testing_dynamic_range_doze(void)
+{
+	int retval;
+	unsigned char *buf;
+	unsigned int idx;
+	unsigned int row;
+	unsigned int col;
+	unsigned int data;
+	unsigned int rows;
+	unsigned int cols;
+	unsigned int data_size;
+	unsigned int limits_rows;
+	unsigned int limits_cols;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	cols = le2_to_uint(app_info->num_of_image_cols);
+
+	retval = testing_run_prod_test_item(TEST_DYNAMIC_RANGE_DOZE);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run test\n");
+		goto exit;
+	}
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	data_size = testing_hcd->resp.data_length / 2;
+
+	if (le2_to_uint(app_info->num_of_buttons))
+		data_size--;
+
+	if (data_size % cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid max number of rows per burst\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	rows = data_size / cols;
+
+	limits_rows = ARRAY_SIZE(drt_hi_limits);
+	limits_cols = ARRAY_SIZE(drt_hi_limits[0]);
+
+	if (rows > limits_rows || cols > limits_cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Mismatching limits data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	limits_rows = ARRAY_SIZE(drt_lo_limits);
+	limits_cols = ARRAY_SIZE(drt_lo_limits[0]);
+
+	if (rows > limits_rows || cols > limits_cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Mismatching limits data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	idx = 0;
+	buf = testing_hcd->resp.buf;
+	testing_hcd->result = true;
+
+	for (row = 0; row < rows; row++) {
+		for (col = 0; col < cols; col++) {
+			data = le2_to_uint(&buf[idx * 2]);
+			if (data > drt_hi_limits[row][col] ||
+					data < drt_lo_limits[row][col]) {
+				testing_hcd->result = false;
+				break;
+			}
+			idx++;
+		}
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_doze_frame_output(rows, cols);
+
+	retval = 0;
+
+exit:
+	if (tcm_hcd->features.dual_firmware) {
+		if (tcm_hcd->reset(tcm_hcd, false, true) < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do reset\n");
+		}
+	}
+
+	return retval;
+}
+
+static int testing_dynamic_range_lpwg(void)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	retval = tcm_hcd->set_dynamic_config(tcm_hcd,
+			DC_IN_WAKEUP_GESTURE_MODE,
+			1);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enable wakeup gesture mode\n");
+		return retval;
+	}
+
+	retval = testing_dynamic_range();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do dynamic range test\n");
+		return retval;
+	}
+
+	retval = tcm_hcd->set_dynamic_config(tcm_hcd,
+			DC_IN_WAKEUP_GESTURE_MODE,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to disable wakeup gesture mode\n");
+		return retval;
+	}
+
+	return 0;
+}
+
+static int testing_dynamic_range(void)
+{
+	int retval;
+	unsigned char *buf;
+	unsigned int idx;
+	unsigned int row;
+	unsigned int col;
+	unsigned int data;
+	unsigned int rows;
+	unsigned int cols;
+	unsigned int limits_rows;
+	unsigned int limits_cols;
+	unsigned int frame_size_words;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	rows = le2_to_uint(app_info->num_of_image_rows);
+	cols = le2_to_uint(app_info->num_of_image_cols);
+
+	testing_get_frame_size_words(&frame_size_words, false);
+
+	retval = testing_run_prod_test_item(TEST_DYNAMIC_RANGE);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run test\n");
+		goto exit;
+	}
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	if (frame_size_words != testing_hcd->resp.data_length / 2) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Frame size mismatch\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	limits_rows = ARRAY_SIZE(drt_hi_limits);
+	limits_cols = ARRAY_SIZE(drt_hi_limits[0]);
+
+	if (rows > limits_rows || cols > limits_cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Mismatching limits data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	limits_rows = ARRAY_SIZE(drt_lo_limits);
+	limits_cols = ARRAY_SIZE(drt_lo_limits[0]);
+
+	if (rows > limits_rows || cols > limits_cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Mismatching limits data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	idx = 0;
+	buf = testing_hcd->resp.buf;
+	testing_hcd->result = true;
+
+	for (row = 0; row < rows; row++) {
+		for (col = 0; col < cols; col++) {
+			data = le2_to_uint(&buf[idx * 2]);
+			if (data > drt_hi_limits[row][col] ||
+					data < drt_lo_limits[row][col]) {
+				testing_hcd->result = false;
+				break;
+			}
+			idx++;
+		}
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_standard_frame_output(false);
+
+	retval = 0;
+
+exit:
+	if (tcm_hcd->features.dual_firmware) {
+		if (tcm_hcd->reset(tcm_hcd, false, true) < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do reset\n");
+		}
+	}
+
+	return retval;
+}
+
+static int testing_noise_doze(void)
+{
+	int retval;
+	short data;
+	unsigned char *buf;
+	unsigned int idx;
+	unsigned int row;
+	unsigned int col;
+	unsigned int rows;
+	unsigned int cols;
+	unsigned int data_size;
+	unsigned int limits_rows;
+	unsigned int limits_cols;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	cols = le2_to_uint(app_info->num_of_image_cols);
+
+	retval = testing_run_prod_test_item(TEST_NOISE_DOZE);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run test\n");
+		goto exit;
+	}
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	data_size = testing_hcd->resp.data_length / 2;
+
+	if (le2_to_uint(app_info->num_of_buttons))
+		data_size--;
+
+	if (data_size % cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid max number of rows per burst\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	rows = data_size / cols;
+
+	limits_rows = ARRAY_SIZE(noise_limits);
+	limits_cols = ARRAY_SIZE(noise_limits[0]);
+
+	if (rows > limits_rows || cols > limits_cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Mismatching limits data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	idx = 0;
+	buf = testing_hcd->resp.buf;
+	testing_hcd->result = true;
+
+	for (row = 0; row < rows; row++) {
+		for (col = 0; col < cols; col++) {
+			data = (short)le2_to_uint(&buf[idx * 2]);
+			if (data > noise_limits[row][col]) {
+				testing_hcd->result = false;
+				break;
+			}
+			idx++;
+		}
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_doze_frame_output(rows, cols);
+
+	retval = 0;
+
+exit:
+	if (tcm_hcd->features.dual_firmware) {
+		if (tcm_hcd->reset(tcm_hcd, false, true) < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do reset\n");
+		}
+	}
+
+	return retval;
+}
+
+static int testing_noise_lpwg(void)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	retval = tcm_hcd->set_dynamic_config(tcm_hcd,
+			DC_IN_WAKEUP_GESTURE_MODE,
+			1);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enable wakeup gesture mode\n");
+		return retval;
+	}
+
+	retval = testing_noise();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do noise test\n");
+		return retval;
+	}
+
+	retval = tcm_hcd->set_dynamic_config(tcm_hcd,
+			DC_IN_WAKEUP_GESTURE_MODE,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to disable wakeup gesture mode\n");
+		return retval;
+	}
+
+	return 0;
+}
+
+static int testing_noise(void)
+{
+	int retval;
+	short data;
+	unsigned char *buf;
+	unsigned int idx;
+	unsigned int row;
+	unsigned int col;
+	unsigned int rows;
+	unsigned int cols;
+	unsigned int limits_rows;
+	unsigned int limits_cols;
+	unsigned int frame_size_words;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	rows = le2_to_uint(app_info->num_of_image_rows);
+	cols = le2_to_uint(app_info->num_of_image_cols);
+
+	testing_get_frame_size_words(&frame_size_words, false);
+
+	retval = testing_run_prod_test_item(TEST_NOISE);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run test\n");
+		goto exit;
+	}
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	if (frame_size_words != testing_hcd->resp.data_length / 2) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Frame size mismatch\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	limits_rows = ARRAY_SIZE(noise_limits);
+	limits_cols = ARRAY_SIZE(noise_limits[0]);
+
+	if (rows > limits_rows || cols > limits_cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Mismatching limits data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	idx = 0;
+	buf = testing_hcd->resp.buf;
+	testing_hcd->result = true;
+
+	for (row = 0; row < rows; row++) {
+		for (col = 0; col < cols; col++) {
+			data = (short)le2_to_uint(&buf[idx * 2]);
+			if (data > noise_limits[row][col]) {
+				testing_hcd->result = false;
+				break;
+			}
+			idx++;
+		}
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_standard_frame_output(false);
+
+	retval = 0;
+
+exit:
+	if (tcm_hcd->features.dual_firmware) {
+		if (tcm_hcd->reset(tcm_hcd, false, true) < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do reset\n");
+		}
+	}
+
+	return retval;
+}
+
+static void testing_open_short_detector_output(void)
+{
+	int retval;
+	unsigned int rows;
+	unsigned int cols;
+	unsigned int data_size;
+	unsigned int header_size;
+	unsigned int output_size;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	rows = le2_to_uint(app_info->num_of_image_rows);
+	cols = le2_to_uint(app_info->num_of_image_cols);
+	data_size = (rows * cols + 7) / 8;
+
+	header_size = sizeof(app_info->num_of_buttons) +
+			sizeof(app_info->num_of_image_rows) +
+			sizeof(app_info->num_of_image_cols) +
+			sizeof(app_info->has_hybrid_data);
+
+	output_size = header_size + data_size * 2;
+
+	LOCK_BUFFER(testing_hcd->output);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&testing_hcd->output,
+			output_size);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for output.buf\n");
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	retval = secure_memcpy(testing_hcd->output.buf,
+			testing_hcd->output.buf_size,
+			&app_info->num_of_buttons[0],
+			header_size,
+			header_size);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy header data\n");
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	output_size = header_size;
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	retval = secure_memcpy(testing_hcd->output.buf + header_size,
+			testing_hcd->output.buf_size - header_size,
+			testing_hcd->resp.buf,
+			testing_hcd->resp.buf_size,
+			testing_hcd->resp.data_length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy test data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	output_size += testing_hcd->resp.data_length;
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_hcd->output.data_length = output_size;
+
+	UNLOCK_BUFFER(testing_hcd->output);
+}
+
+static int testing_open_short_detector(void)
+{
+	int retval;
+	unsigned int bit;
+	unsigned int byte;
+	unsigned int row;
+	unsigned int col;
+	unsigned int rows;
+	unsigned int cols;
+	unsigned int data_size;
+	unsigned char *data;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	rows = le2_to_uint(app_info->num_of_image_rows);
+	cols = le2_to_uint(app_info->num_of_image_cols);
+	data_size = (rows * cols + 7) / 8;
+
+	retval = testing_run_prod_test_item(TEST_OPEN_SHORT_DETECTOR);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run test\n");
+		goto exit;
+	}
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	if (data_size * 2 != testing_hcd->resp.data_length) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Data size mismatch\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	testing_hcd->result = true;
+
+	bit = 0;
+	byte = 0;
+	data = &testing_hcd->resp.buf[0];
+	for (row = 0; row < rows; row++) {
+		for (col = 0; col < cols; col++) {
+			if (data[byte] & (1 << bit)) {
+				testing_hcd->result = false;
+				break;
+			}
+			if (bit++ > 7) {
+				bit = 0;
+				byte++;
+			}
+		}
+	}
+
+	if (testing_hcd->result == true) {
+		bit = 0;
+		byte = 0;
+		data = &testing_hcd->resp.buf[data_size];
+		for (row = 0; row < rows; row++) {
+			for (col = 0; col < cols; col++) {
+				if (data[byte] & (1 << bit)) {
+					testing_hcd->result = false;
+					break;
+				}
+				if (bit++ > 7) {
+					bit = 0;
+					byte++;
+				}
+			}
+		}
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_open_short_detector_output();
+
+	retval = 0;
+
+exit:
+	if (tcm_hcd->reset(tcm_hcd, false, true) < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to do reset\n");
+	}
+
+	return retval;
+}
+
+static int testing_pt11(void)
+{
+	int retval;
+	short data;
+	unsigned char *buf;
+	unsigned int idx;
+	unsigned int row;
+	unsigned int col;
+	unsigned int rows;
+	unsigned int cols;
+	unsigned int limits_rows;
+	unsigned int limits_cols;
+	unsigned int image_size_words;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	rows = le2_to_uint(app_info->num_of_image_rows);
+	cols = le2_to_uint(app_info->num_of_image_cols);
+
+	testing_get_frame_size_words(&image_size_words, true);
+
+	retval = testing_run_prod_test_item(TEST_PT11);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run test\n");
+		goto exit;
+	}
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	if (image_size_words != testing_hcd->resp.data_length / 2) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Image size mismatch\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	limits_rows = ARRAY_SIZE(pt11_hi_limits);
+	limits_cols = ARRAY_SIZE(pt11_hi_limits[0]);
+
+	if (rows > limits_rows || cols > limits_cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Mismatching limits data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	limits_rows = ARRAY_SIZE(pt11_lo_limits);
+	limits_cols = ARRAY_SIZE(pt11_lo_limits[0]);
+
+	if (rows > limits_rows || cols > limits_cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Mismatching limits data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	idx = 0;
+	buf = testing_hcd->resp.buf;
+	testing_hcd->result = true;
+
+	for (row = 0; row < rows; row++) {
+		for (col = 0; col < cols; col++) {
+			data = (short)le2_to_uint(&buf[idx * 2]);
+			if (data > pt11_hi_limits[row][col] ||
+					data < pt11_lo_limits[row][col]) {
+				testing_hcd->result = false;
+				break;
+			}
+			idx++;
+		}
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_standard_frame_output(true);
+
+	retval = 0;
+
+exit:
+	if (tcm_hcd->features.dual_firmware) {
+		if (tcm_hcd->reset(tcm_hcd, false, true) < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do reset\n");
+		}
+	}
+
+	return retval;
+}
+
+static int testing_pt12(void)
+{
+	int retval;
+	short data;
+	unsigned char *buf;
+	unsigned int idx;
+	unsigned int row;
+	unsigned int col;
+	unsigned int rows;
+	unsigned int cols;
+	unsigned int limits_rows;
+	unsigned int limits_cols;
+	unsigned int image_size_words;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	rows = le2_to_uint(app_info->num_of_image_rows);
+	cols = le2_to_uint(app_info->num_of_image_cols);
+
+	testing_get_frame_size_words(&image_size_words, true);
+
+	retval = testing_run_prod_test_item(TEST_PT12);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run test\n");
+		goto exit;
+	}
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	if (image_size_words != testing_hcd->resp.data_length / 2) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Image size mismatch\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	limits_rows = ARRAY_SIZE(pt12_limits);
+	limits_cols = ARRAY_SIZE(pt12_limits[0]);
+
+	if (rows > limits_rows || cols > limits_cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Mismatching limits data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	idx = 0;
+	buf = testing_hcd->resp.buf;
+	testing_hcd->result = true;
+
+	for (row = 0; row < rows; row++) {
+		for (col = 0; col < cols; col++) {
+			data = (short)le2_to_uint(&buf[idx * 2]);
+			if (data < pt12_limits[row][col]) {
+				testing_hcd->result = false;
+				break;
+			}
+			idx++;
+		}
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_standard_frame_output(true);
+
+	retval = 0;
+
+exit:
+	if (tcm_hcd->features.dual_firmware) {
+		if (tcm_hcd->reset(tcm_hcd, false, true) < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do reset\n");
+		}
+	}
+
+	return retval;
+}
+
+static int testing_pt13(void)
+{
+	int retval;
+	short data;
+	unsigned char *buf;
+	unsigned int idx;
+	unsigned int row;
+	unsigned int col;
+	unsigned int rows;
+	unsigned int cols;
+	unsigned int limits_rows;
+	unsigned int limits_cols;
+	unsigned int image_size_words;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	app_info = &tcm_hcd->app_info;
+
+	rows = le2_to_uint(app_info->num_of_image_rows);
+	cols = le2_to_uint(app_info->num_of_image_cols);
+
+	testing_get_frame_size_words(&image_size_words, true);
+
+	retval = testing_run_prod_test_item(TEST_PT13);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run test\n");
+		goto exit;
+	}
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	if (image_size_words != testing_hcd->resp.data_length / 2) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Image size mismatch\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	limits_rows = ARRAY_SIZE(pt13_limits);
+	limits_cols = ARRAY_SIZE(pt13_limits[0]);
+
+	if (rows > limits_rows || cols > limits_cols) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Mismatching limits data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		retval = -EINVAL;
+		goto exit;
+	}
+
+	idx = 0;
+	buf = testing_hcd->resp.buf;
+	testing_hcd->result = true;
+
+	for (row = 0; row < rows; row++) {
+		for (col = 0; col < cols; col++) {
+			data = (short)le2_to_uint(&buf[idx * 2]);
+			if (data < pt13_limits[row][col]) {
+				testing_hcd->result = false;
+				break;
+			}
+			idx++;
+		}
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_standard_frame_output(true);
+
+	retval = 0;
+
+exit:
+	if (tcm_hcd->features.dual_firmware) {
+		if (tcm_hcd->reset(tcm_hcd, false, true) < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do reset\n");
+		}
+	}
+
+	return retval;
+}
+
+static int testing_reset_open(void)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	if (bdata->reset_gpio < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Hardware reset unavailable\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&tcm_hcd->reset_mutex);
+
+	tcm_hcd->update_watchdog(tcm_hcd, false);
+
+	gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+	msleep(bdata->reset_active_ms);
+	gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+	msleep(bdata->reset_delay_ms);
+
+	tcm_hcd->update_watchdog(tcm_hcd, true);
+
+	mutex_unlock(&tcm_hcd->reset_mutex);
+
+	if (tcm_hcd->id_info.mode == MODE_APPLICATION) {
+		retval = tcm_hcd->switch_mode(tcm_hcd, FW_MODE_BOOTLOADER);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to enter bootloader mode\n");
+			return retval;
+		}
+	} else {
+		retval = tcm_hcd->identify(tcm_hcd, false);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do identification\n");
+			goto run_app_firmware;
+		}
+	}
+
+	if (tcm_hcd->boot_info.last_reset_reason == reset_open_limit)
+		testing_hcd->result = true;
+	else
+		testing_hcd->result = false;
+
+	retval = 0;
+
+run_app_firmware:
+	if (tcm_hcd->switch_mode(tcm_hcd, FW_MODE_APPLICATION) < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run application firmware\n");
+	}
+
+	return retval;
+}
+
+static void testing_lockdown_output(void)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	LOCK_BUFFER(testing_hcd->output);
+	LOCK_BUFFER(testing_hcd->resp);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&testing_hcd->output,
+			testing_hcd->resp.data_length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for output.buf\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	retval = secure_memcpy(testing_hcd->output.buf,
+			testing_hcd->output.buf_size,
+			testing_hcd->resp.buf,
+			testing_hcd->resp.buf_size,
+			testing_hcd->resp.data_length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy test data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	testing_hcd->output.data_length = testing_hcd->resp.data_length;
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+	UNLOCK_BUFFER(testing_hcd->output);
+}
+
+static int testing_lockdown(void)
+{
+	int retval;
+	unsigned int idx;
+	unsigned int lockdown_size;
+	unsigned int limits_size;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	if (tcm_hcd->read_flash_data == NULL) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Unable to read from flash\n");
+		return -EINVAL;
+	}
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	retval = tcm_hcd->read_flash_data(CUSTOM_OTP, true, &testing_hcd->resp);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read lockdown data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		return retval;
+	}
+
+	lockdown_size = testing_hcd->resp.data_length;
+
+	limits_size = sizeof(lockdown_limits) / sizeof(*lockdown_limits);
+
+	if (lockdown_size != limits_size) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Mismatching limits data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		return -EINVAL;
+	}
+
+	testing_hcd->result = true;
+
+	for (idx = 0; idx < lockdown_size; idx++) {
+		if (testing_hcd->resp.buf[idx] != lockdown_limits[idx]) {
+			testing_hcd->result = false;
+			break;
+		}
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_lockdown_output();
+
+	return 0;
+}
+
+static void testing_trx_output(void)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	LOCK_BUFFER(testing_hcd->output);
+	LOCK_BUFFER(testing_hcd->resp);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&testing_hcd->output,
+			testing_hcd->resp.data_length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for output.buf\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	retval = secure_memcpy(testing_hcd->output.buf,
+			testing_hcd->output.buf_size,
+			testing_hcd->resp.buf,
+			testing_hcd->resp.buf_size,
+			testing_hcd->resp.data_length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy test data\n");
+		UNLOCK_BUFFER(testing_hcd->resp);
+		UNLOCK_BUFFER(testing_hcd->output);
+		return;
+	}
+
+	testing_hcd->output.data_length = testing_hcd->resp.data_length;
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+	UNLOCK_BUFFER(testing_hcd->output);
+}
+
+static int testing_trx(enum test_code test_code)
+{
+	int retval;
+	unsigned char pass_vector;
+	unsigned int idx;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	switch (test_code) {
+	case TEST_TRX_TRX_SHORTS:
+	case TEST_TRX_GROUND_SHORTS:
+		pass_vector = 0xff;
+		break;
+	case TEST_TRX_SENSOR_OPENS:
+		pass_vector = 0x00;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	retval = testing_run_prod_test_item(test_code);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to run test\n");
+		goto exit;
+	}
+
+	LOCK_BUFFER(testing_hcd->resp);
+
+	testing_hcd->result = true;
+
+	for (idx = 0; idx < testing_hcd->resp.data_length; idx++) {
+		if (testing_hcd->resp.buf[idx] != pass_vector) {
+			testing_hcd->result = false;
+			break;
+		}
+	}
+
+	UNLOCK_BUFFER(testing_hcd->resp);
+
+	testing_trx_output();
+
+	retval = 0;
+
+exit:
+	if (tcm_hcd->features.dual_firmware) {
+		if (tcm_hcd->reset(tcm_hcd, false, true) < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do reset\n");
+		}
+	}
+
+	return retval;
+}
+
+static void testing_report(void)
+{
+	int retval;
+	unsigned int offset;
+	unsigned int report_size;
+	struct syna_tcm_hcd *tcm_hcd = testing_hcd->tcm_hcd;
+
+	report_size = tcm_hcd->report.buffer.data_length;
+
+	LOCK_BUFFER(testing_hcd->report);
+
+	if (testing_hcd->report_index == 0) {
+		retval = syna_tcm_alloc_mem(tcm_hcd,
+				&testing_hcd->report,
+				report_size * testing_hcd->num_of_reports);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for report.buf\n");
+			UNLOCK_BUFFER(testing_hcd->report);
+			return;
+		}
+	}
+
+	if (testing_hcd->report_index < testing_hcd->num_of_reports) {
+		offset = report_size * testing_hcd->report_index;
+
+		retval = secure_memcpy(testing_hcd->report.buf + offset,
+				testing_hcd->report.buf_size - offset,
+				tcm_hcd->report.buffer.buf,
+				tcm_hcd->report.buffer.buf_size,
+				tcm_hcd->report.buffer.data_length);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to copy report data\n");
+			UNLOCK_BUFFER(testing_hcd->report);
+			return;
+		}
+
+		testing_hcd->report_index++;
+		testing_hcd->report.data_length += report_size;
+	}
+
+	UNLOCK_BUFFER(testing_hcd->report);
+
+	if (testing_hcd->report_index == testing_hcd->num_of_reports)
+		complete(&report_complete);
+}
+
+static int testing_init(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	int idx;
+
+	testing_hcd = kzalloc(sizeof(*testing_hcd), GFP_KERNEL);
+	if (!testing_hcd) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for testing_hcd\n");
+		return -ENOMEM;
+	}
+
+	testing_hcd->tcm_hcd = tcm_hcd;
+
+	testing_hcd->collect_reports = testing_collect_reports;
+
+	INIT_BUFFER(testing_hcd->out, false);
+	INIT_BUFFER(testing_hcd->resp, false);
+	INIT_BUFFER(testing_hcd->report, false);
+	INIT_BUFFER(testing_hcd->process, false);
+	INIT_BUFFER(testing_hcd->output, false);
+
+	testing_hcd->sysfs_dir = kobject_create_and_add(SYSFS_DIR_NAME,
+			tcm_hcd->sysfs_dir);
+	if (!testing_hcd->sysfs_dir) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create sysfs directory\n");
+		retval = -EINVAL;
+		goto err_sysfs_create_dir;
+	}
+
+	for (idx = 0; idx < ARRAY_SIZE(attrs); idx++) {
+		retval = sysfs_create_file(testing_hcd->sysfs_dir,
+				&(*attrs[idx]).attr);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to create sysfs file\n");
+			goto err_sysfs_create_file;
+		}
+	}
+
+	retval = sysfs_create_bin_file(testing_hcd->sysfs_dir, &bin_attr);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to create sysfs bin file\n");
+		goto err_sysfs_create_bin_file;
+	}
+
+	return 0;
+
+err_sysfs_create_bin_file:
+err_sysfs_create_file:
+	for (idx--; idx >= 0; idx--)
+		sysfs_remove_file(testing_hcd->sysfs_dir, &(*attrs[idx]).attr);
+
+	kobject_put(testing_hcd->sysfs_dir);
+
+err_sysfs_create_dir:
+	RELEASE_BUFFER(testing_hcd->output);
+	RELEASE_BUFFER(testing_hcd->process);
+	RELEASE_BUFFER(testing_hcd->report);
+	RELEASE_BUFFER(testing_hcd->resp);
+	RELEASE_BUFFER(testing_hcd->out);
+
+	kfree(testing_hcd);
+	testing_hcd = NULL;
+
+	return retval;
+}
+
+static int testing_remove(struct syna_tcm_hcd *tcm_hcd)
+{
+	int idx;
+
+	if (!testing_hcd)
+		goto exit;
+
+	sysfs_remove_bin_file(testing_hcd->sysfs_dir, &bin_attr);
+
+	for (idx = 0; idx < ARRAY_SIZE(attrs); idx++)
+		sysfs_remove_file(testing_hcd->sysfs_dir, &(*attrs[idx]).attr);
+
+	kobject_put(testing_hcd->sysfs_dir);
+
+	RELEASE_BUFFER(testing_hcd->output);
+	RELEASE_BUFFER(testing_hcd->process);
+	RELEASE_BUFFER(testing_hcd->report);
+	RELEASE_BUFFER(testing_hcd->resp);
+	RELEASE_BUFFER(testing_hcd->out);
+
+	kfree(testing_hcd);
+	testing_hcd = NULL;
+
+exit:
+	complete(&testing_remove_complete);
+
+	return 0;
+}
+
+static int testing_reset(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+
+	if (!testing_hcd) {
+		retval = testing_init(tcm_hcd);
+		return retval;
+	}
+
+	return 0;
+}
+
+static int testing_syncbox(struct syna_tcm_hcd *tcm_hcd)
+{
+	if (!testing_hcd)
+		return 0;
+
+	if (tcm_hcd->report.id == testing_hcd->report_type)
+		testing_report();
+
+	return 0;
+}
+
+static struct syna_tcm_module_cb testing_module = {
+	.type = TCM_TESTING,
+	.init = testing_init,
+	.remove = testing_remove,
+	.syncbox = testing_syncbox,
+	.asyncbox = NULL,
+	.reset = testing_reset,
+	.suspend = NULL,
+	.resume = NULL,
+	.early_suspend = NULL,
+};
+
+static int __init testing_module_init(void)
+{
+	return syna_tcm_add_module(&testing_module, true);
+}
+
+static void __exit testing_module_exit(void)
+{
+	syna_tcm_add_module(&testing_module, false);
+
+	wait_for_completion(&testing_remove_complete);
+}
+
+module_init(testing_module_init);
+module_exit(testing_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics TCM Testing Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_testing.h b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_testing.h
new file mode 100644
index 0000000..c5a39a5
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_testing.h
@@ -0,0 +1,85 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_TCM_TESTING_H_
+#define _SYNAPTICS_TCM_TESTING_H_
+
+static const unsigned short drt_hi_limits[32][32] = {
+	{0},
+};
+
+static const unsigned short drt_lo_limits[32][32] = {
+	{0,},
+};
+
+static const unsigned short noise_limits[32][32] = {
+	{0,},
+};
+
+static const short pt11_hi_limits[32][32] = {
+	{0,},
+};
+
+static const short pt11_lo_limits[32][32] = {
+	{0,},
+};
+
+static const short pt12_limits[32][32] = {
+	{0,},
+};
+
+static const short pt13_limits[32][32] = {
+	{0,},
+};
+
+static const unsigned char lockdown_limits[] = {
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+};
+
+static const unsigned char reset_open_limit = 0x13;
+
+#endif
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_touch.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_touch.c
new file mode 100644
index 0000000..fbc3b28
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_touch.c
@@ -0,0 +1,1272 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include "synaptics_tcm_core.h"
+
+#define TYPE_B_PROTOCOL
+
+#define USE_DEFAULT_TOUCH_REPORT_CONFIG
+
+#define TOUCH_REPORT_CONFIG_SIZE 128
+
+enum touch_status {
+	LIFT = 0,
+	FINGER = 1,
+	GLOVED_FINGER = 2,
+	NOP = -1,
+};
+
+enum touch_report_code {
+	TOUCH_END = 0,
+	TOUCH_FOREACH_ACTIVE_OBJECT,
+	TOUCH_FOREACH_OBJECT,
+	TOUCH_FOREACH_END,
+	TOUCH_PAD_TO_NEXT_BYTE,
+	TOUCH_TIMESTAMP,
+	TOUCH_OBJECT_N_INDEX,
+	TOUCH_OBJECT_N_CLASSIFICATION,
+	TOUCH_OBJECT_N_X_POSITION,
+	TOUCH_OBJECT_N_Y_POSITION,
+	TOUCH_OBJECT_N_Z,
+	TOUCH_OBJECT_N_X_WIDTH,
+	TOUCH_OBJECT_N_Y_WIDTH,
+	TOUCH_OBJECT_N_TX_POSITION_TIXELS,
+	TOUCH_OBJECT_N_RX_POSITION_TIXELS,
+	TOUCH_0D_BUTTONS_STATE,
+	TOUCH_GESTURE_DOUBLE_TAP,
+	TOUCH_FRAME_RATE,
+	TOUCH_POWER_IM,
+	TOUCH_CID_IM,
+	TOUCH_RAIL_IM,
+	TOUCH_CID_VARIANCE_IM,
+	TOUCH_NSM_FREQUENCY,
+	TOUCH_NSM_STATE,
+	TOUCH_NUM_OF_ACTIVE_OBJECTS,
+	TOUCH_NUM_OF_CPU_CYCLES_USED_SINCE_LAST_FRAME,
+	TOUCH_TUNING_GAUSSIAN_WIDTHS = 0x80,
+	TOUCH_TUNING_SMALL_OBJECT_PARAMS,
+	TOUCH_TUNING_0D_BUTTONS_VARIANCE,
+};
+
+struct object_data {
+	unsigned char status;
+	unsigned int x_pos;
+	unsigned int y_pos;
+	unsigned int x_width;
+	unsigned int y_width;
+	unsigned int z;
+	unsigned int tx_pos;
+	unsigned int rx_pos;
+};
+
+struct input_params {
+	unsigned int max_x;
+	unsigned int max_y;
+	unsigned int max_objects;
+};
+
+struct touch_data {
+	struct object_data *object_data;
+	unsigned int timestamp;
+	unsigned int buttons_state;
+	unsigned int gesture_double_tap;
+	unsigned int frame_rate;
+	unsigned int power_im;
+	unsigned int cid_im;
+	unsigned int rail_im;
+	unsigned int cid_variance_im;
+	unsigned int nsm_frequency;
+	unsigned int nsm_state;
+	unsigned int num_of_active_objects;
+	unsigned int num_of_cpu_cycles;
+};
+
+struct touch_hcd {
+	bool irq_wake;
+	bool report_touch;
+	bool suspend_touch;
+	unsigned char *prev_status;
+	unsigned int max_x;
+	unsigned int max_y;
+	unsigned int max_objects;
+	struct mutex report_mutex;
+	struct input_dev *input_dev;
+	struct touch_data touch_data;
+	struct input_params input_params;
+	struct syna_tcm_buffer out;
+	struct syna_tcm_buffer resp;
+	struct syna_tcm_hcd *tcm_hcd;
+};
+
+DECLARE_COMPLETION(touch_remove_complete);
+
+static struct touch_hcd *touch_hcd;
+
+/**
+ * touch_free_objects() - Free all touch objects
+ *
+ * Report finger lift events to the input subsystem for all touch objects.
+ */
+static void touch_free_objects(void)
+{
+#ifdef TYPE_B_PROTOCOL
+	unsigned int idx;
+#endif
+
+	if (touch_hcd->input_dev == NULL)
+		return;
+
+	mutex_lock(&touch_hcd->report_mutex);
+
+#ifdef TYPE_B_PROTOCOL
+	for (idx = 0; idx < touch_hcd->max_objects; idx++) {
+		input_mt_slot(touch_hcd->input_dev, idx);
+		input_mt_report_slot_state(touch_hcd->input_dev,
+				MT_TOOL_FINGER, 0);
+	}
+#endif
+	input_report_key(touch_hcd->input_dev,
+			BTN_TOUCH, 0);
+	input_report_key(touch_hcd->input_dev,
+			BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+	input_mt_sync(touch_hcd->input_dev);
+#endif
+	input_sync(touch_hcd->input_dev);
+
+	mutex_unlock(&touch_hcd->report_mutex);
+}
+
+/**
+ * touch_get_report_data() - Retrieve data from touch report
+ *
+ * Retrieve data from the touch report based on the bit offset and bit length
+ * information from the touch report configuration.
+ */
+static int touch_get_report_data(unsigned int offset,
+		unsigned int bits, unsigned int *data)
+{
+	unsigned char mask;
+	unsigned char byte_data;
+	unsigned int output_data;
+	unsigned int bit_offset;
+	unsigned int byte_offset;
+	unsigned int data_bits;
+	unsigned int available_bits;
+	unsigned int remaining_bits;
+	unsigned char *touch_report;
+	struct syna_tcm_hcd *tcm_hcd = touch_hcd->tcm_hcd;
+
+	if (bits == 0 || bits > 32) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid number of bits\n");
+		return -EINVAL;
+	}
+
+	if (offset + bits > tcm_hcd->report.buffer.data_length * 8) {
+		*data = 0;
+		return 0;
+	}
+
+	touch_report = tcm_hcd->report.buffer.buf;
+
+	output_data = 0;
+	remaining_bits = bits;
+
+	bit_offset = offset % 8;
+	byte_offset = offset / 8;
+
+	while (remaining_bits) {
+		byte_data = touch_report[byte_offset];
+		byte_data >>= bit_offset;
+
+		available_bits = 8 - bit_offset;
+		data_bits = MIN(available_bits, remaining_bits);
+		mask = 0xff >> (8 - data_bits);
+
+		byte_data &= mask;
+
+		output_data |= byte_data << (bits - remaining_bits);
+
+		bit_offset = 0;
+		byte_offset += 1;
+		remaining_bits -= data_bits;
+	}
+
+	*data = output_data;
+
+	return 0;
+}
+
+/**
+ * touch_parse_report() - Parse touch report
+ *
+ * Traverse through the touch report configuration and parse the touch report
+ * generated by the device accordingly to retrieve the touch data.
+ */
+static int touch_parse_report(void)
+{
+	int retval;
+	bool active_only;
+	bool num_of_active_objects;
+	unsigned char code;
+	unsigned int size;
+	unsigned int idx;
+	unsigned int obj;
+	unsigned int next;
+	unsigned int data;
+	unsigned int bits;
+	unsigned int offset;
+	unsigned int objects;
+	unsigned int active_objects;
+	unsigned int report_size;
+	unsigned int config_size;
+	unsigned char *config_data;
+	struct touch_data *touch_data;
+	struct object_data *object_data;
+	struct syna_tcm_hcd *tcm_hcd = touch_hcd->tcm_hcd;
+	static unsigned int end_of_foreach;
+
+	touch_data = &touch_hcd->touch_data;
+	object_data = touch_hcd->touch_data.object_data;
+
+	config_data = tcm_hcd->config.buf;
+	config_size = tcm_hcd->config.data_length;
+
+	report_size = tcm_hcd->report.buffer.data_length;
+
+	size = sizeof(*object_data) * touch_hcd->max_objects;
+	memset(touch_hcd->touch_data.object_data, 0x00, size);
+
+	num_of_active_objects = false;
+
+	idx = 0;
+	offset = 0;
+	objects = 0;
+	while (idx < config_size) {
+		code = config_data[idx++];
+		switch (code) {
+		case TOUCH_END:
+			goto exit;
+		case TOUCH_FOREACH_ACTIVE_OBJECT:
+			obj = 0;
+			next = idx;
+			active_only = true;
+			break;
+		case TOUCH_FOREACH_OBJECT:
+			obj = 0;
+			next = idx;
+			active_only = false;
+			break;
+		case TOUCH_FOREACH_END:
+			end_of_foreach = idx;
+			if (active_only) {
+				if (num_of_active_objects) {
+					objects++;
+					if (objects < active_objects)
+						idx = next;
+				} else if (offset < report_size * 8) {
+					idx = next;
+				}
+			} else {
+				obj++;
+				if (obj < touch_hcd->max_objects)
+					idx = next;
+			}
+			break;
+		case TOUCH_PAD_TO_NEXT_BYTE:
+			offset = ceil_div(offset, 8) * 8;
+			break;
+		case TOUCH_TIMESTAMP:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to get timestamp\n");
+				return retval;
+			}
+			touch_data->timestamp = data;
+			offset += bits;
+			break;
+		case TOUCH_OBJECT_N_INDEX:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &obj);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to get object index\n");
+				return retval;
+			}
+			offset += bits;
+			break;
+		case TOUCH_OBJECT_N_CLASSIFICATION:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get classification data\n");
+				return retval;
+			}
+			object_data[obj].status = data;
+			offset += bits;
+			break;
+		case TOUCH_OBJECT_N_X_POSITION:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get object x position\n");
+				return retval;
+			}
+			object_data[obj].x_pos = data;
+			offset += bits;
+			break;
+		case TOUCH_OBJECT_N_Y_POSITION:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get object y position\n");
+				return retval;
+			}
+			object_data[obj].y_pos = data;
+			offset += bits;
+			break;
+		case TOUCH_OBJECT_N_Z:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to get object z\n");
+				return retval;
+			}
+			object_data[obj].z = data;
+			offset += bits;
+			break;
+		case TOUCH_OBJECT_N_X_WIDTH:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get object x width\n");
+				return retval;
+			}
+			object_data[obj].x_width = data;
+			offset += bits;
+			break;
+		case TOUCH_OBJECT_N_Y_WIDTH:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get object y width\n");
+				return retval;
+			}
+			object_data[obj].y_width = data;
+			offset += bits;
+			break;
+		case TOUCH_OBJECT_N_TX_POSITION_TIXELS:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get object tx position\n");
+				return retval;
+			}
+			object_data[obj].tx_pos = data;
+			offset += bits;
+			break;
+		case TOUCH_OBJECT_N_RX_POSITION_TIXELS:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get object rx position\n");
+				return retval;
+			}
+			object_data[obj].rx_pos = data;
+			offset += bits;
+			break;
+		case TOUCH_0D_BUTTONS_STATE:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get 0D buttons state\n");
+				return retval;
+			}
+			touch_data->buttons_state = data;
+			offset += bits;
+			break;
+		case TOUCH_GESTURE_DOUBLE_TAP:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get gesture double tap\n");
+				return retval;
+			}
+			touch_data->gesture_double_tap = data;
+			offset += bits;
+			break;
+		case TOUCH_FRAME_RATE:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to get frame rate\n");
+				return retval;
+			}
+			touch_data->frame_rate = data;
+			offset += bits;
+			break;
+		case TOUCH_POWER_IM:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to get power IM\n");
+				return retval;
+			}
+			touch_data->power_im = data;
+			offset += bits;
+			break;
+		case TOUCH_CID_IM:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to get CID IM\n");
+				return retval;
+			}
+			touch_data->cid_im = data;
+			offset += bits;
+			break;
+		case TOUCH_RAIL_IM:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to get rail IM\n");
+				return retval;
+			}
+			touch_data->rail_im = data;
+			offset += bits;
+			break;
+		case TOUCH_CID_VARIANCE_IM:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get CID variance IM\n");
+				return retval;
+			}
+			touch_data->cid_variance_im = data;
+			offset += bits;
+			break;
+		case TOUCH_NSM_FREQUENCY:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get NSM frequency\n");
+				return retval;
+			}
+			touch_data->nsm_frequency = data;
+			offset += bits;
+			break;
+		case TOUCH_NSM_STATE:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Failed to get NSM state\n");
+				return retval;
+			}
+			touch_data->nsm_state = data;
+			offset += bits;
+			break;
+		case TOUCH_NUM_OF_ACTIVE_OBJECTS:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get number of objects\n");
+				return retval;
+			}
+			active_objects = data;
+			num_of_active_objects = true;
+			touch_data->num_of_active_objects = data;
+			offset += bits;
+			if (touch_data->num_of_active_objects == 0)
+				idx = end_of_foreach;
+			break;
+		case TOUCH_NUM_OF_CPU_CYCLES_USED_SINCE_LAST_FRAME:
+			bits = config_data[idx++];
+			retval = touch_get_report_data(offset, bits, &data);
+			if (retval < 0) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to get number of CPU cycles\n");
+				return retval;
+			}
+			touch_data->num_of_cpu_cycles = data;
+			offset += bits;
+			break;
+		case TOUCH_TUNING_GAUSSIAN_WIDTHS:
+			bits = config_data[idx++];
+			offset += bits;
+			break;
+		case TOUCH_TUNING_SMALL_OBJECT_PARAMS:
+			bits = config_data[idx++];
+			offset += bits;
+			break;
+		case TOUCH_TUNING_0D_BUTTONS_VARIANCE:
+			bits = config_data[idx++];
+			offset += bits;
+			break;
+		}
+	}
+
+exit:
+	return 0;
+}
+
+/**
+ * touch_report() - Report touch events
+ *
+ * Retrieve data from the touch report generated by the device and report touch
+ * events to the input subsystem.
+ */
+static void touch_report(void)
+{
+	int retval;
+	unsigned int idx;
+	unsigned int x;
+	unsigned int y;
+	unsigned int temp;
+	unsigned int status;
+	unsigned int touch_count;
+	struct touch_data *touch_data;
+	struct object_data *object_data;
+	struct syna_tcm_hcd *tcm_hcd = touch_hcd->tcm_hcd;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	if (!touch_hcd->report_touch)
+		return;
+
+	if (touch_hcd->input_dev == NULL)
+		return;
+
+	mutex_lock(&touch_hcd->report_mutex);
+
+	retval = touch_parse_report();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to parse touch report\n");
+		goto exit;
+	}
+
+	touch_data = &touch_hcd->touch_data;
+	object_data = touch_hcd->touch_data.object_data;
+
+#ifdef WAKEUP_GESTURE
+	if (touch_data->gesture_double_tap && tcm_hcd->in_suspend) {
+		input_report_key(touch_hcd->input_dev, KEY_WAKEUP, 1);
+		input_sync(touch_hcd->input_dev);
+		input_report_key(touch_hcd->input_dev, KEY_WAKEUP, 0);
+		input_sync(touch_hcd->input_dev);
+	}
+#endif
+
+	if (tcm_hcd->in_suspend)
+		goto exit;
+
+	touch_count = 0;
+
+	for (idx = 0; idx < touch_hcd->max_objects; idx++) {
+		if (touch_hcd->prev_status[idx] == LIFT &&
+				object_data[idx].status == LIFT)
+			status = NOP;
+		else
+			status = object_data[idx].status;
+
+		switch (status) {
+		case LIFT:
+#ifdef TYPE_B_PROTOCOL
+			input_mt_slot(touch_hcd->input_dev, idx);
+			input_mt_report_slot_state(touch_hcd->input_dev,
+					MT_TOOL_FINGER, 0);
+#endif
+			break;
+		case FINGER:
+		case GLOVED_FINGER:
+			x = object_data[idx].x_pos;
+			y = object_data[idx].y_pos;
+			if (bdata->swap_axes) {
+				temp = x;
+				x = y;
+				y = temp;
+			}
+			if (bdata->x_flip)
+				x = touch_hcd->input_params.max_x - x;
+			if (bdata->y_flip)
+				y = touch_hcd->input_params.max_y - y;
+#ifdef TYPE_B_PROTOCOL
+			input_mt_slot(touch_hcd->input_dev, idx);
+			input_mt_report_slot_state(touch_hcd->input_dev,
+					MT_TOOL_FINGER, 1);
+#endif
+			input_report_key(touch_hcd->input_dev,
+					BTN_TOUCH, 1);
+			input_report_key(touch_hcd->input_dev,
+					BTN_TOOL_FINGER, 1);
+			input_report_abs(touch_hcd->input_dev,
+					ABS_MT_POSITION_X, x);
+			input_report_abs(touch_hcd->input_dev,
+					ABS_MT_POSITION_Y, y);
+#ifndef TYPE_B_PROTOCOL
+			input_mt_sync(touch_hcd->input_dev);
+#endif
+			LOGD(tcm_hcd->pdev->dev.parent,
+					"Finger %d: x = %d, y = %d\n",
+					idx, x, y);
+			touch_count++;
+			break;
+		default:
+			break;
+		}
+
+		touch_hcd->prev_status[idx] = object_data[idx].status;
+	}
+
+	if (touch_count == 0) {
+		input_report_key(touch_hcd->input_dev,
+				BTN_TOUCH, 0);
+		input_report_key(touch_hcd->input_dev,
+				BTN_TOOL_FINGER, 0);
+#ifndef TYPE_B_PROTOCOL
+		input_mt_sync(touch_hcd->input_dev);
+#endif
+	}
+
+	input_sync(touch_hcd->input_dev);
+
+exit:
+	mutex_unlock(&touch_hcd->report_mutex);
+}
+
+/**
+ * touch_set_input_params() - Set input parameters
+ *
+ * Set the input parameters of the input device based on the information
+ * retrieved from the application information packet. In addition, set up an
+ * array for tracking the status of touch objects.
+ */
+static int touch_set_input_params(void)
+{
+	struct syna_tcm_hcd *tcm_hcd = touch_hcd->tcm_hcd;
+
+	input_set_abs_params(touch_hcd->input_dev,
+			ABS_MT_POSITION_X, 0, touch_hcd->max_x, 0, 0);
+	input_set_abs_params(touch_hcd->input_dev,
+			ABS_MT_POSITION_Y, 0, touch_hcd->max_y, 0, 0);
+
+	input_mt_init_slots(touch_hcd->input_dev, touch_hcd->max_objects,
+			INPUT_MT_DIRECT);
+
+	touch_hcd->input_params.max_x = touch_hcd->max_x;
+	touch_hcd->input_params.max_y = touch_hcd->max_y;
+	touch_hcd->input_params.max_objects = touch_hcd->max_objects;
+
+	if (touch_hcd->max_objects == 0)
+		return 0;
+
+	kfree(touch_hcd->prev_status);
+	touch_hcd->prev_status = kzalloc(touch_hcd->max_objects, GFP_KERNEL);
+	if (!touch_hcd->prev_status) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for prev_status\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * touch_get_input_params() - Get input parameters
+ *
+ * Retrieve the input parameters to register with the input subsystem for
+ * the input device from the application information packet. In addition,
+ * the touch report configuration is retrieved and stored.
+ */
+static int touch_get_input_params(void)
+{
+	int retval;
+	unsigned int temp;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = touch_hcd->tcm_hcd;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	app_info = &tcm_hcd->app_info;
+	touch_hcd->max_x = le2_to_uint(app_info->max_x);
+	touch_hcd->max_y = le2_to_uint(app_info->max_y);
+	touch_hcd->max_objects = le2_to_uint(app_info->max_objects);
+
+	if (bdata->swap_axes) {
+		temp = touch_hcd->max_x;
+		touch_hcd->max_x = touch_hcd->max_y;
+		touch_hcd->max_y = temp;
+	}
+
+	LOCK_BUFFER(tcm_hcd->config);
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_GET_TOUCH_REPORT_CONFIG,
+			NULL,
+			0,
+			&tcm_hcd->config.buf,
+			&tcm_hcd->config.buf_size,
+			&tcm_hcd->config.data_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_GET_TOUCH_REPORT_CONFIG));
+		UNLOCK_BUFFER(tcm_hcd->config);
+		return retval;
+	}
+
+	UNLOCK_BUFFER(tcm_hcd->config);
+
+	return 0;
+}
+
+/**
+ * touch_set_input_dev() - Set up input device
+ *
+ * Allocate an input device, configure the input device based on the particular
+ * input events to be reported, and register the input device with the input
+ * subsystem.
+ */
+static int touch_set_input_dev(void)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = touch_hcd->tcm_hcd;
+
+	touch_hcd->input_dev = input_allocate_device();
+	if (touch_hcd->input_dev == NULL) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate input device\n");
+		return -ENODEV;
+	}
+
+	touch_hcd->input_dev->name = TOUCH_INPUT_NAME;
+	touch_hcd->input_dev->phys = TOUCH_INPUT_PHYS_PATH;
+	touch_hcd->input_dev->id.product = SYNAPTICS_TCM_ID_PRODUCT;
+	touch_hcd->input_dev->id.version = SYNAPTICS_TCM_ID_VERSION;
+	touch_hcd->input_dev->dev.parent = tcm_hcd->pdev->dev.parent;
+	input_set_drvdata(touch_hcd->input_dev, tcm_hcd);
+
+	set_bit(EV_SYN, touch_hcd->input_dev->evbit);
+	set_bit(EV_KEY, touch_hcd->input_dev->evbit);
+	set_bit(EV_ABS, touch_hcd->input_dev->evbit);
+	set_bit(BTN_TOUCH, touch_hcd->input_dev->keybit);
+	set_bit(BTN_TOOL_FINGER, touch_hcd->input_dev->keybit);
+#ifdef INPUT_PROP_DIRECT
+	set_bit(INPUT_PROP_DIRECT, touch_hcd->input_dev->propbit);
+#endif
+
+#ifdef WAKEUP_GESTURE
+	set_bit(KEY_WAKEUP, touch_hcd->input_dev->keybit);
+	input_set_capability(touch_hcd->input_dev, EV_KEY, KEY_WAKEUP);
+#endif
+
+	retval = touch_set_input_params();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to set input parameters\n");
+		input_free_device(touch_hcd->input_dev);
+		touch_hcd->input_dev = NULL;
+		return retval;
+	}
+
+	retval = input_register_device(touch_hcd->input_dev);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to register input device\n");
+		input_free_device(touch_hcd->input_dev);
+		touch_hcd->input_dev = NULL;
+		return retval;
+	}
+
+	return 0;
+}
+
+/**
+ * touch_set_report_config() - Set touch report configuration
+ *
+ * Send the SET_TOUCH_REPORT_CONFIG command to configure the format and content
+ * of the touch report.
+ */
+static int touch_set_report_config(void)
+{
+	int retval;
+	unsigned int idx;
+	unsigned int length;
+	struct syna_tcm_app_info *app_info;
+	struct syna_tcm_hcd *tcm_hcd = touch_hcd->tcm_hcd;
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+
+	if (!bdata->extend_report)
+		return 0;
+
+	app_info = &tcm_hcd->app_info;
+	length = le2_to_uint(app_info->max_touch_report_config_size);
+
+	if (length < TOUCH_REPORT_CONFIG_SIZE) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid maximum touch report config size\n");
+		return -EINVAL;
+	}
+
+	LOCK_BUFFER(touch_hcd->out);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&touch_hcd->out,
+			length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for touch_hcd->out.buf\n");
+		UNLOCK_BUFFER(touch_hcd->out);
+		return retval;
+	}
+
+	idx = 0;
+#ifdef WAKEUP_GESTURE
+	touch_hcd->out.buf[idx++] = TOUCH_GESTURE_DOUBLE_TAP;
+	touch_hcd->out.buf[idx++] = 8;
+#endif
+	touch_hcd->out.buf[idx++] = TOUCH_FOREACH_ACTIVE_OBJECT;
+	touch_hcd->out.buf[idx++] = TOUCH_OBJECT_N_INDEX;
+	touch_hcd->out.buf[idx++] = 4;
+	touch_hcd->out.buf[idx++] = TOUCH_OBJECT_N_CLASSIFICATION;
+	touch_hcd->out.buf[idx++] = 4;
+	touch_hcd->out.buf[idx++] = TOUCH_OBJECT_N_X_POSITION;
+	touch_hcd->out.buf[idx++] = 12;
+	touch_hcd->out.buf[idx++] = TOUCH_OBJECT_N_Y_POSITION;
+	touch_hcd->out.buf[idx++] = 12;
+	touch_hcd->out.buf[idx++] = TOUCH_FOREACH_END;
+	touch_hcd->out.buf[idx++] = TOUCH_END;
+
+	LOCK_BUFFER(touch_hcd->resp);
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_SET_TOUCH_REPORT_CONFIG,
+			touch_hcd->out.buf,
+			length,
+			&touch_hcd->resp.buf,
+			&touch_hcd->resp.buf_size,
+			&touch_hcd->resp.data_length,
+			NULL,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_SET_TOUCH_REPORT_CONFIG));
+		UNLOCK_BUFFER(touch_hcd->resp);
+		UNLOCK_BUFFER(touch_hcd->out);
+		return retval;
+	}
+
+	UNLOCK_BUFFER(touch_hcd->resp);
+	UNLOCK_BUFFER(touch_hcd->out);
+
+	return 0;
+}
+
+/**
+ * touch_check_input_params() - Check input parameters
+ *
+ * Check if any of the input parameters registered with the input subsystem for
+ * the input device has changed.
+ */
+static int touch_check_input_params(void)
+{
+	unsigned int size;
+	struct syna_tcm_hcd *tcm_hcd = touch_hcd->tcm_hcd;
+
+	if (touch_hcd->max_x == 0 && touch_hcd->max_y == 0)
+		return 0;
+
+	if (touch_hcd->input_params.max_objects != touch_hcd->max_objects) {
+		kfree(touch_hcd->touch_data.object_data);
+		size = sizeof(*touch_hcd->touch_data.object_data);
+		size *= touch_hcd->max_objects;
+		touch_hcd->touch_data.object_data = kzalloc(size, GFP_KERNEL);
+		if (!touch_hcd->touch_data.object_data) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for object_data\n");
+			return -ENOMEM;
+		}
+		return 1;
+	}
+
+	if (touch_hcd->input_params.max_x != touch_hcd->max_x)
+		return 1;
+
+	if (touch_hcd->input_params.max_y != touch_hcd->max_y)
+		return 1;
+
+	return 0;
+}
+
+/**
+ * touch_set_input_reporting() - Configure touch report and set up new input
+ * device if necessary
+ *
+ * After a device reset event, configure the touch report and set up a new input
+ * device if any of the input parameters has changed after the device reset.
+ */
+static int touch_set_input_reporting(void)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = touch_hcd->tcm_hcd;
+
+	if (tcm_hcd->id_info.mode != MODE_APPLICATION ||
+			tcm_hcd->app_status != APP_STATUS_OK) {
+		LOGN(tcm_hcd->pdev->dev.parent,
+				"Application firmware not running\n");
+		return 0;
+	}
+
+	touch_hcd->report_touch = false;
+
+	touch_free_objects();
+
+	mutex_lock(&touch_hcd->report_mutex);
+
+	retval = touch_set_report_config();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to set report config\n");
+		goto exit;
+	}
+
+	retval = touch_get_input_params();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to get input parameters\n");
+		goto exit;
+	}
+
+	retval = touch_check_input_params();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to check input parameters\n");
+		goto exit;
+	} else if (retval == 0) {
+		LOGD(tcm_hcd->pdev->dev.parent,
+				"Input parameters unchanged\n");
+		goto exit;
+	}
+
+	if (touch_hcd->input_dev != NULL) {
+		input_unregister_device(touch_hcd->input_dev);
+		touch_hcd->input_dev = NULL;
+	}
+
+	retval = touch_set_input_dev();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to set up input device\n");
+		goto exit;
+	}
+
+exit:
+	mutex_unlock(&touch_hcd->report_mutex);
+
+	touch_hcd->report_touch = retval < 0 ? false : true;
+
+	return retval;
+}
+
+static int touch_init(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+
+	touch_hcd = kzalloc(sizeof(*touch_hcd), GFP_KERNEL);
+	if (!touch_hcd) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to allocate memory for touch_hcd\n");
+		return -ENOMEM;
+	}
+
+	touch_hcd->tcm_hcd = tcm_hcd;
+
+	mutex_init(&touch_hcd->report_mutex);
+
+	INIT_BUFFER(touch_hcd->out, false);
+	INIT_BUFFER(touch_hcd->resp, false);
+
+	retval = touch_set_input_reporting();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to set up input reporting\n");
+		goto err_set_input_reporting;
+	}
+
+	tcm_hcd->report_touch = touch_report;
+
+	return 0;
+
+err_set_input_reporting:
+	kfree(touch_hcd->touch_data.object_data);
+	kfree(touch_hcd->prev_status);
+
+	RELEASE_BUFFER(touch_hcd->resp);
+	RELEASE_BUFFER(touch_hcd->out);
+
+	kfree(touch_hcd);
+	touch_hcd = NULL;
+
+	return retval;
+}
+
+static int touch_remove(struct syna_tcm_hcd *tcm_hcd)
+{
+	if (!touch_hcd)
+		goto exit;
+
+	tcm_hcd->report_touch = NULL;
+
+	if (touch_hcd->input_dev)
+		input_unregister_device(touch_hcd->input_dev);
+
+	kfree(touch_hcd->touch_data.object_data);
+	kfree(touch_hcd->prev_status);
+
+	RELEASE_BUFFER(touch_hcd->resp);
+	RELEASE_BUFFER(touch_hcd->out);
+
+	kfree(touch_hcd);
+	touch_hcd = NULL;
+
+exit:
+	complete(&touch_remove_complete);
+
+	return 0;
+}
+
+static int touch_syncbox(struct syna_tcm_hcd *tcm_hcd)
+{
+	if (!touch_hcd)
+		return 0;
+
+	switch (tcm_hcd->report.id) {
+	case REPORT_IDENTIFY:
+		touch_free_objects();
+		break;
+	case REPORT_TOUCH:
+		if (!touch_hcd->suspend_touch)
+			touch_report();
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int touch_asyncbox(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+
+	if (!touch_hcd)
+		return 0;
+
+	switch (tcm_hcd->async_report_id) {
+	case REPORT_IDENTIFY:
+		if (tcm_hcd->id_info.mode != MODE_APPLICATION)
+			break;
+		retval = tcm_hcd->identify(tcm_hcd, false);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to do identification\n");
+			return retval;
+		}
+		retval = touch_set_input_reporting();
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to set up input reporting\n");
+			return retval;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int touch_reset(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+
+	if (!touch_hcd) {
+		retval = touch_init(tcm_hcd);
+		return retval;
+	}
+
+	if (tcm_hcd->id_info.mode == MODE_APPLICATION) {
+		retval = touch_set_input_reporting();
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to set up input reporting\n");
+			return retval;
+		}
+	}
+
+	return 0;
+}
+
+static int touch_early_suspend(struct syna_tcm_hcd *tcm_hcd)
+{
+	if (!touch_hcd)
+		return 0;
+
+#ifdef WAKEUP_GESTURE
+	touch_hcd->suspend_touch = false;
+#else
+	touch_hcd->suspend_touch = true;
+#endif
+
+	touch_free_objects();
+
+	return 0;
+}
+
+static int touch_suspend(struct syna_tcm_hcd *tcm_hcd)
+{
+#ifdef WAKEUP_GESTURE
+	int retval;
+#endif
+
+	if (!touch_hcd)
+		return 0;
+
+	touch_hcd->suspend_touch = true;
+
+	touch_free_objects();
+
+#ifdef WAKEUP_GESTURE
+	if (!touch_hcd->irq_wake) {
+		enable_irq_wake(tcm_hcd->irq);
+		touch_hcd->irq_wake = true;
+	}
+
+	touch_hcd->suspend_touch = false;
+
+	retval = tcm_hcd->set_dynamic_config(tcm_hcd,
+			DC_IN_WAKEUP_GESTURE_MODE,
+			1);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to enable wakeup gesture mode\n");
+		return retval;
+	}
+#endif
+
+	return 0;
+}
+
+static int touch_resume(struct syna_tcm_hcd *tcm_hcd)
+{
+#ifdef WAKEUP_GESTURE
+	int retval;
+#endif
+
+	if (!touch_hcd)
+		return 0;
+
+	touch_hcd->suspend_touch = false;
+
+#ifdef WAKEUP_GESTURE
+	if (touch_hcd->irq_wake) {
+		disable_irq_wake(tcm_hcd->irq);
+		touch_hcd->irq_wake = false;
+	}
+
+	retval = tcm_hcd->set_dynamic_config(tcm_hcd,
+			DC_IN_WAKEUP_GESTURE_MODE,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to disable wakeup gesture mode\n");
+		return retval;
+	}
+#endif
+
+	return 0;
+}
+
+static struct syna_tcm_module_cb touch_module = {
+	.type = TCM_TOUCH,
+	.init = touch_init,
+	.remove = touch_remove,
+	.syncbox = touch_syncbox,
+	.asyncbox = touch_asyncbox,
+	.reset = touch_reset,
+	.suspend = touch_suspend,
+	.resume = touch_resume,
+	.early_suspend = touch_early_suspend,
+};
+
+static int __init touch_module_init(void)
+{
+	return syna_tcm_add_module(&touch_module, true);
+}
+
+static void __exit touch_module_exit(void)
+{
+	syna_tcm_add_module(&touch_module, false);
+
+	wait_for_completion(&touch_remove_complete);
+}
+
+module_init(touch_module_init);
+module_exit(touch_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics TCM Touch Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_zeroflash.c b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_zeroflash.c
new file mode 100644
index 0000000..5e82954
--- /dev/null
+++ b/drivers/input/touchscreen/synaptics_tcm/synaptics_tcm_zeroflash.c
@@ -0,0 +1,1012 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#include <linux/gpio.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+#include "synaptics_tcm_core.h"
+
+#define FW_IMAGE_NAME "synaptics/hdl_firmware.img"
+
+#define BOOT_CONFIG_ID "BOOT_CONFIG"
+
+#define F35_APP_CODE_ID "F35_APP_CODE"
+
+#define APP_CONFIG_ID "APP_CONFIG"
+
+#define DISP_CONFIG_ID "DISPLAY"
+
+#define IMAGE_FILE_MAGIC_VALUE 0x4818472b
+
+#define FLASH_AREA_MAGIC_VALUE 0x7c05e516
+
+#define PDT_START_ADDR 0x00e9
+
+#define PDT_END_ADDR 0x00ee
+
+#define UBL_FN_NUMBER 0x35
+
+#define F35_CTRL3_OFFSET 18
+
+#define F35_CTRL7_OFFSET 22
+
+#define F35_WRITE_FW_TO_PMEM_COMMAND 4
+
+#define RESET_TO_HDL_DELAY_MS 12
+
+#define DOWNLOAD_RETRY_COUNT 10
+
+enum f35_error_code {
+	SUCCESS = 0,
+	UNKNOWN_FLASH_PRESENT,
+	MAGIC_NUMBER_NOT_PRESENT,
+	INVALID_BLOCK_NUMBER,
+	BLOCK_NOT_ERASED,
+	NO_FLASH_PRESENT,
+	CHECKSUM_FAILURE,
+	WRITE_FAILURE,
+	INVALID_COMMAND,
+	IN_DEBUG_MODE,
+	INVALID_HEADER,
+	REQUESTING_FIRMWARE,
+	INVALID_CONFIGURATION,
+	DISABLE_BLOCK_PROTECT_FAILURE,
+};
+
+enum config_download {
+	HDL_INVALID = 0,
+	HDL_TOUCH_CONFIG,
+	HDL_DISPLAY_CONFIG,
+	HDL_DISPLAY_CONFIG_TO_RAM,
+};
+
+struct area_descriptor {
+	unsigned char magic_value[4];
+	unsigned char id_string[16];
+	unsigned char flags[4];
+	unsigned char flash_addr_words[4];
+	unsigned char length[4];
+	unsigned char checksum[4];
+};
+
+struct block_data {
+	const unsigned char *data;
+	unsigned int size;
+	unsigned int flash_addr;
+};
+
+struct image_info {
+	unsigned int packrat_number;
+	struct block_data boot_config;
+	struct block_data app_firmware;
+	struct block_data app_config;
+	struct block_data disp_config;
+};
+
+struct image_header {
+	unsigned char magic_value[4];
+	unsigned char num_of_areas[4];
+};
+
+struct rmi_f35_query {
+	unsigned char version:4;
+	unsigned char has_debug_mode:1;
+	unsigned char has_data5:1;
+	unsigned char has_query1:1;
+	unsigned char has_query2:1;
+	unsigned char chunk_size;
+	unsigned char has_ctrl7:1;
+	unsigned char has_host_download:1;
+	unsigned char has_spi_master:1;
+	unsigned char advanced_recovery_mode:1;
+	unsigned char reserved:4;
+} __packed;
+
+struct rmi_f35_data {
+	unsigned char error_code:5;
+	unsigned char recovery_mode_forced:1;
+	unsigned char nvm_programmed:1;
+	unsigned char in_recovery:1;
+} __packed;
+
+struct rmi_pdt_entry {
+	unsigned char query_base_addr;
+	unsigned char command_base_addr;
+	unsigned char control_base_addr;
+	unsigned char data_base_addr;
+	unsigned char intr_src_count:3;
+	unsigned char reserved_1:2;
+	unsigned char fn_version:2;
+	unsigned char reserved_2:1;
+	unsigned char fn_number;
+} __packed;
+
+struct rmi_addr {
+	unsigned short query_base;
+	unsigned short command_base;
+	unsigned short control_base;
+	unsigned short data_base;
+};
+
+struct firmware_status {
+	unsigned short invalid_static_config:1;
+	unsigned short need_disp_config:1;
+	unsigned short need_app_config:1;
+	unsigned short hdl_version:4;
+	unsigned short reserved:9;
+} __packed;
+
+struct zeroflash_hcd {
+	bool has_hdl;
+	bool f35_ready;
+	const unsigned char *image;
+	unsigned char *buf;
+	const struct firmware *fw_entry;
+	struct work_struct config_work;
+	struct work_struct firmware_work;
+	struct workqueue_struct *workqueue;
+	struct rmi_addr f35_addr;
+	struct image_info image_info;
+	struct firmware_status fw_status;
+	struct syna_tcm_buffer out;
+	struct syna_tcm_buffer resp;
+	struct syna_tcm_hcd *tcm_hcd;
+};
+
+DECLARE_COMPLETION(zeroflash_remove_complete);
+
+static struct zeroflash_hcd *zeroflash_hcd;
+
+static int zeroflash_check_uboot(void)
+{
+	int retval;
+	unsigned char fn_number;
+	struct rmi_f35_query query;
+	struct rmi_pdt_entry p_entry;
+	struct syna_tcm_hcd *tcm_hcd = zeroflash_hcd->tcm_hcd;
+
+	retval = syna_tcm_rmi_read(tcm_hcd,
+			PDT_END_ADDR,
+			&fn_number,
+			sizeof(fn_number));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read RMI function number\n");
+		return retval;
+	}
+
+	LOGD(tcm_hcd->pdev->dev.parent,
+			"Found F$%02x\n",
+			fn_number);
+
+	if (fn_number != UBL_FN_NUMBER) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to find F$35\n");
+		return -ENODEV;
+	}
+
+	if (zeroflash_hcd->f35_ready)
+		return 0;
+
+	retval = syna_tcm_rmi_read(tcm_hcd,
+			PDT_START_ADDR,
+			(unsigned char *)&p_entry,
+			sizeof(p_entry));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read PDT entry\n");
+		return retval;
+	}
+
+	zeroflash_hcd->f35_addr.query_base = p_entry.query_base_addr;
+	zeroflash_hcd->f35_addr.command_base = p_entry.command_base_addr;
+	zeroflash_hcd->f35_addr.control_base = p_entry.control_base_addr;
+	zeroflash_hcd->f35_addr.data_base = p_entry.data_base_addr;
+
+	retval = syna_tcm_rmi_read(tcm_hcd,
+			zeroflash_hcd->f35_addr.query_base,
+			(unsigned char *)&query,
+			sizeof(query));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read F$35 query\n");
+		return retval;
+	}
+
+	zeroflash_hcd->f35_ready = true;
+
+	if (query.has_query2 && query.has_ctrl7 && query.has_host_download) {
+		zeroflash_hcd->has_hdl = true;
+	} else {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Host download not supported\n");
+		zeroflash_hcd->has_hdl = false;
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int zeroflash_parse_fw_image(void)
+{
+	unsigned int idx;
+	unsigned int addr;
+	unsigned int offset;
+	unsigned int length;
+	unsigned int checksum;
+	unsigned int flash_addr;
+	unsigned int magic_value;
+	unsigned int num_of_areas;
+	struct image_header *header;
+	struct image_info *image_info;
+	struct area_descriptor *descriptor;
+	struct syna_tcm_hcd *tcm_hcd = zeroflash_hcd->tcm_hcd;
+	const unsigned char *image;
+	const unsigned char *content;
+
+	image = zeroflash_hcd->image;
+	image_info = &zeroflash_hcd->image_info;
+	header = (struct image_header *)image;
+
+	magic_value = le4_to_uint(header->magic_value);
+	if (magic_value != IMAGE_FILE_MAGIC_VALUE) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid image file magic value\n");
+		return -EINVAL;
+	}
+
+	memset(image_info, 0x00, sizeof(*image_info));
+
+	offset = sizeof(*header);
+	num_of_areas = le4_to_uint(header->num_of_areas);
+
+	for (idx = 0; idx < num_of_areas; idx++) {
+		addr = le4_to_uint(image + offset);
+		descriptor = (struct area_descriptor *)(image + addr);
+		offset += 4;
+
+		magic_value = le4_to_uint(descriptor->magic_value);
+		if (magic_value != FLASH_AREA_MAGIC_VALUE)
+			continue;
+
+		length = le4_to_uint(descriptor->length);
+		content = (unsigned char *)descriptor + sizeof(*descriptor);
+		flash_addr = le4_to_uint(descriptor->flash_addr_words) * 2;
+		checksum = le4_to_uint(descriptor->checksum);
+
+		if (!memcmp((char *)descriptor->id_string,
+				BOOT_CONFIG_ID,
+				strlen(BOOT_CONFIG_ID))) {
+			if (checksum != (crc32(~0, content, length) ^ ~0)) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+						"Boot config checksum error\n");
+				return -EINVAL;
+			}
+			image_info->boot_config.size = length;
+			image_info->boot_config.data = content;
+			image_info->boot_config.flash_addr = flash_addr;
+			LOGD(tcm_hcd->pdev->dev.parent,
+					"Boot config size = %d\n",
+					length);
+			LOGD(tcm_hcd->pdev->dev.parent,
+					"Boot config flash address = 0x%08x\n",
+					flash_addr);
+		} else if (!memcmp((char *)descriptor->id_string,
+				F35_APP_CODE_ID,
+				strlen(F35_APP_CODE_ID))) {
+			if (checksum != (crc32(~0, content, length) ^ ~0)) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"APP firmware checksum error\n");
+				return -EINVAL;
+			}
+			image_info->app_firmware.size = length;
+			image_info->app_firmware.data = content;
+			image_info->app_firmware.flash_addr = flash_addr;
+			LOGD(tcm_hcd->pdev->dev.parent,
+					"Application firmware size = %d\n",
+					length);
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Application firmware flash address = 0x%08x\n",
+				flash_addr);
+		} else if (!memcmp((char *)descriptor->id_string,
+				APP_CONFIG_ID,
+				strlen(APP_CONFIG_ID))) {
+			if (checksum != (crc32(~0, content, length) ^ ~0)) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Application config checksum error\n");
+				return -EINVAL;
+			}
+			image_info->app_config.size = length;
+			image_info->app_config.data = content;
+			image_info->app_config.flash_addr = flash_addr;
+			image_info->packrat_number = le4_to_uint(&content[14]);
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Application config size = %d\n",
+				length);
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Application config flash address = 0x%08x\n",
+				flash_addr);
+		} else if (!memcmp((char *)descriptor->id_string,
+				DISP_CONFIG_ID,
+				strlen(DISP_CONFIG_ID))) {
+			if (checksum != (crc32(~0, content, length) ^ ~0)) {
+				LOGE(tcm_hcd->pdev->dev.parent,
+					"Display config checksum error\n");
+				return -EINVAL;
+			}
+			image_info->disp_config.size = length;
+			image_info->disp_config.data = content;
+			image_info->disp_config.flash_addr = flash_addr;
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Display config size = %d\n",
+				length);
+			LOGD(tcm_hcd->pdev->dev.parent,
+				"Display config flash address = 0x%08x\n",
+				flash_addr);
+		}
+	}
+
+	return 0;
+}
+
+static int zeroflash_get_fw_image(void)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = zeroflash_hcd->tcm_hcd;
+
+	if (zeroflash_hcd->fw_entry != NULL)
+		return 0;
+
+	do {
+		retval = request_firmware(&zeroflash_hcd->fw_entry,
+				FW_IMAGE_NAME,
+				tcm_hcd->pdev->dev.parent);
+		if (retval < 0) {
+			LOGD(tcm_hcd->pdev->dev.parent,
+					"Failed to request %s\n",
+					FW_IMAGE_NAME);
+			msleep(100);
+		} else {
+			break;
+		}
+	} while (1);
+
+	LOGD(tcm_hcd->pdev->dev.parent,
+			"Firmware image size = %d\n",
+			(unsigned int)zeroflash_hcd->fw_entry->size);
+
+	zeroflash_hcd->image = zeroflash_hcd->fw_entry->data;
+
+	retval = zeroflash_parse_fw_image();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to parse firmware image\n");
+		release_firmware(zeroflash_hcd->fw_entry);
+		zeroflash_hcd->fw_entry = NULL;
+		zeroflash_hcd->image = NULL;
+		return retval;
+	}
+
+	return 0;
+}
+
+static void zeroflash_download_config(void)
+{
+	struct firmware_status *fw_status;
+	struct syna_tcm_hcd *tcm_hcd = zeroflash_hcd->tcm_hcd;
+
+	fw_status = &zeroflash_hcd->fw_status;
+
+	if (!fw_status->need_app_config && !fw_status->need_disp_config) {
+		if (atomic_read(&tcm_hcd->helper.task) == HELP_NONE) {
+			atomic_set(&tcm_hcd->helper.task,
+					HELP_SEND_RESET_NOTIFICATION);
+			queue_work(tcm_hcd->helper.workqueue,
+					&tcm_hcd->helper.work);
+		}
+		atomic_set(&tcm_hcd->host_downloading, 0);
+		return;
+	}
+
+	queue_work(zeroflash_hcd->workqueue, &zeroflash_hcd->config_work);
+}
+
+static void zeroflash_download_firmware(void)
+{
+	queue_work(zeroflash_hcd->workqueue, &zeroflash_hcd->firmware_work);
+}
+
+static int zeroflash_download_disp_config(void)
+{
+	int retval;
+	unsigned char response_code;
+	struct image_info *image_info;
+	struct syna_tcm_hcd *tcm_hcd = zeroflash_hcd->tcm_hcd;
+	static unsigned int retry_count;
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Downloading display config\n");
+
+	image_info = &zeroflash_hcd->image_info;
+
+	if (image_info->disp_config.size == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"No display config in image file\n");
+		return -EINVAL;
+	}
+
+	LOCK_BUFFER(zeroflash_hcd->out);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&zeroflash_hcd->out,
+			image_info->disp_config.size + 2);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for out.buf\n");
+		goto unlock_out;
+	}
+
+	switch (zeroflash_hcd->fw_status.hdl_version) {
+	case 0:
+		zeroflash_hcd->out.buf[0] = 1;
+		break;
+	case 1:
+		zeroflash_hcd->out.buf[0] = 2;
+		break;
+	default:
+		retval = -EINVAL;
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid HDL version (%d)\n",
+				zeroflash_hcd->fw_status.hdl_version);
+		goto unlock_out;
+	}
+
+	zeroflash_hcd->out.buf[1] = HDL_DISPLAY_CONFIG;
+
+	retval = secure_memcpy(&zeroflash_hcd->out.buf[2],
+			zeroflash_hcd->out.buf_size - 2,
+			image_info->disp_config.data,
+			image_info->disp_config.size,
+			image_info->disp_config.size);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy display config data\n");
+		goto unlock_out;
+	}
+
+	zeroflash_hcd->out.data_length = image_info->disp_config.size + 2;
+
+	LOCK_BUFFER(zeroflash_hcd->resp);
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_DOWNLOAD_CONFIG,
+			zeroflash_hcd->out.buf,
+			zeroflash_hcd->out.data_length,
+			&zeroflash_hcd->resp.buf,
+			&zeroflash_hcd->resp.buf_size,
+			&zeroflash_hcd->resp.data_length,
+			&response_code,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_DOWNLOAD_CONFIG));
+		if (response_code != STATUS_ERROR)
+			goto unlock_resp;
+		retry_count++;
+		if (DOWNLOAD_RETRY_COUNT && retry_count > DOWNLOAD_RETRY_COUNT)
+			goto unlock_resp;
+	} else {
+		retry_count = 0;
+	}
+
+	retval = secure_memcpy((unsigned char *)&zeroflash_hcd->fw_status,
+			sizeof(zeroflash_hcd->fw_status),
+			zeroflash_hcd->resp.buf,
+			zeroflash_hcd->resp.buf_size,
+			sizeof(zeroflash_hcd->fw_status));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy firmware status\n");
+		goto unlock_resp;
+	}
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Display config downloaded\n");
+
+	retval = 0;
+
+unlock_resp:
+	UNLOCK_BUFFER(zeroflash_hcd->resp);
+
+unlock_out:
+	UNLOCK_BUFFER(zeroflash_hcd->out);
+
+	return retval;
+}
+
+static int zeroflash_download_app_config(void)
+{
+	int retval;
+	unsigned char padding;
+	unsigned char response_code;
+	struct image_info *image_info;
+	struct syna_tcm_hcd *tcm_hcd = zeroflash_hcd->tcm_hcd;
+	static unsigned int retry_count;
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Downloading application config\n");
+
+	image_info = &zeroflash_hcd->image_info;
+
+	if (image_info->app_config.size == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"No application config in image file\n");
+		return -EINVAL;
+	}
+
+	padding = image_info->app_config.size % 8;
+	if (padding)
+		padding = 8 - padding;
+
+	LOCK_BUFFER(zeroflash_hcd->out);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&zeroflash_hcd->out,
+			image_info->app_config.size + 2 + padding);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for out.buf\n");
+		goto unlock_out;
+	}
+
+	switch (zeroflash_hcd->fw_status.hdl_version) {
+	case 0:
+		zeroflash_hcd->out.buf[0] = 1;
+		break;
+	case 1:
+		zeroflash_hcd->out.buf[0] = 2;
+		break;
+	default:
+		retval = -EINVAL;
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Invalid HDL version (%d)\n",
+				zeroflash_hcd->fw_status.hdl_version);
+		goto unlock_out;
+	}
+
+	zeroflash_hcd->out.buf[1] = HDL_TOUCH_CONFIG;
+
+	retval = secure_memcpy(&zeroflash_hcd->out.buf[2],
+			zeroflash_hcd->out.buf_size - 2,
+			image_info->app_config.data,
+			image_info->app_config.size,
+			image_info->app_config.size);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy application config data\n");
+		goto unlock_out;
+	}
+
+	zeroflash_hcd->out.data_length = image_info->app_config.size + 2;
+	zeroflash_hcd->out.data_length += padding;
+
+	LOCK_BUFFER(zeroflash_hcd->resp);
+
+	retval = tcm_hcd->write_message(tcm_hcd,
+			CMD_DOWNLOAD_CONFIG,
+			zeroflash_hcd->out.buf,
+			zeroflash_hcd->out.data_length,
+			&zeroflash_hcd->resp.buf,
+			&zeroflash_hcd->resp.buf_size,
+			&zeroflash_hcd->resp.data_length,
+			&response_code,
+			0);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write command %s\n",
+				STR(CMD_DOWNLOAD_CONFIG));
+		if (response_code != STATUS_ERROR)
+			goto unlock_resp;
+		retry_count++;
+		if (DOWNLOAD_RETRY_COUNT && retry_count > DOWNLOAD_RETRY_COUNT)
+			goto unlock_resp;
+	} else {
+		retry_count = 0;
+	}
+
+	retval = secure_memcpy((unsigned char *)&zeroflash_hcd->fw_status,
+			sizeof(zeroflash_hcd->fw_status),
+			zeroflash_hcd->resp.buf,
+			zeroflash_hcd->resp.buf_size,
+			sizeof(zeroflash_hcd->fw_status));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy firmware status\n");
+		goto unlock_resp;
+	}
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Application config downloaded\n");
+
+	retval = 0;
+
+unlock_resp:
+	UNLOCK_BUFFER(zeroflash_hcd->resp);
+
+unlock_out:
+	UNLOCK_BUFFER(zeroflash_hcd->out);
+
+	return retval;
+}
+
+static void zeroflash_download_config_work(struct work_struct *work)
+{
+	int retval;
+	struct syna_tcm_hcd *tcm_hcd = zeroflash_hcd->tcm_hcd;
+
+	retval = zeroflash_get_fw_image();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to get firmware image\n");
+		return;
+	}
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Start of config download\n");
+
+	if (zeroflash_hcd->fw_status.need_app_config) {
+		retval = zeroflash_download_app_config();
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to download application config\n");
+			return;
+		}
+		goto exit;
+	}
+
+	if (zeroflash_hcd->fw_status.need_disp_config) {
+		retval = zeroflash_download_disp_config();
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to download display config\n");
+			return;
+		}
+		goto exit;
+	}
+
+exit:
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"End of config download\n");
+
+	zeroflash_download_config();
+}
+
+static int zeroflash_download_app_fw(void)
+{
+	int retval;
+	unsigned char command;
+	struct image_info *image_info;
+	struct syna_tcm_hcd *tcm_hcd = zeroflash_hcd->tcm_hcd;
+#if RESET_TO_HDL_DELAY_MS
+	const struct syna_tcm_board_data *bdata = tcm_hcd->hw_if->bdata;
+#endif
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Downloading application firmware\n");
+
+	image_info = &zeroflash_hcd->image_info;
+
+	if (image_info->app_firmware.size == 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"No application firmware in image file\n");
+		return -EINVAL;
+	}
+
+	LOCK_BUFFER(zeroflash_hcd->out);
+
+	retval = syna_tcm_alloc_mem(tcm_hcd,
+			&zeroflash_hcd->out,
+			image_info->app_firmware.size);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for out.buf\n");
+		UNLOCK_BUFFER(zeroflash_hcd->out);
+		return retval;
+	}
+
+	retval = secure_memcpy(zeroflash_hcd->out.buf,
+			zeroflash_hcd->out.buf_size,
+			image_info->app_firmware.data,
+			image_info->app_firmware.size,
+			image_info->app_firmware.size);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to copy application firmware data\n");
+		UNLOCK_BUFFER(zeroflash_hcd->out);
+		return retval;
+	}
+
+	zeroflash_hcd->out.data_length = image_info->app_firmware.size;
+
+	command = F35_WRITE_FW_TO_PMEM_COMMAND;
+
+#if RESET_TO_HDL_DELAY_MS
+	gpio_set_value(bdata->reset_gpio, bdata->reset_on_state);
+	msleep(bdata->reset_active_ms);
+	gpio_set_value(bdata->reset_gpio, !bdata->reset_on_state);
+	msleep(RESET_TO_HDL_DELAY_MS);
+#endif
+
+	retval = syna_tcm_rmi_write(tcm_hcd,
+			zeroflash_hcd->f35_addr.control_base + F35_CTRL3_OFFSET,
+			&command,
+			sizeof(command));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write F$35 command\n");
+		UNLOCK_BUFFER(zeroflash_hcd->out);
+		return retval;
+	}
+
+	retval = syna_tcm_rmi_write(tcm_hcd,
+			zeroflash_hcd->f35_addr.control_base + F35_CTRL7_OFFSET,
+			zeroflash_hcd->out.buf,
+			zeroflash_hcd->out.data_length);
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to write application firmware data\n");
+		UNLOCK_BUFFER(zeroflash_hcd->out);
+		return retval;
+	}
+
+	UNLOCK_BUFFER(zeroflash_hcd->out);
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Application firmware downloaded\n");
+
+	return 0;
+}
+
+static void zeroflash_download_firmware_work(struct work_struct *work)
+{
+	int retval;
+	struct rmi_f35_data data;
+	struct syna_tcm_hcd *tcm_hcd = zeroflash_hcd->tcm_hcd;
+	static unsigned int retry_count;
+
+	retval = zeroflash_check_uboot();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Microbootloader support unavailable\n");
+		goto exit;
+	}
+
+	atomic_set(&tcm_hcd->host_downloading, 1);
+
+	retval = syna_tcm_rmi_read(tcm_hcd,
+			zeroflash_hcd->f35_addr.data_base,
+			(unsigned char *)&data,
+			sizeof(data));
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to read F$35 data\n");
+		goto exit;
+	}
+
+	if (data.error_code != REQUESTING_FIRMWARE) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Microbootloader error code = 0x%02x\n",
+				data.error_code);
+		if (data.error_code != CHECKSUM_FAILURE) {
+			retval = -EIO;
+			goto exit;
+		} else {
+			retry_count++;
+		}
+	} else {
+		retry_count = 0;
+	}
+
+	retval = zeroflash_get_fw_image();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to get firmware image\n");
+		goto exit;
+	}
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"Start of firmware download\n");
+
+	retval = zeroflash_download_app_fw();
+	if (retval < 0) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+				"Failed to download application firmware\n");
+		goto exit;
+	}
+
+	LOGN(tcm_hcd->pdev->dev.parent,
+			"End of firmware download\n");
+
+exit:
+	if (retval < 0)
+		retry_count++;
+
+	if (DOWNLOAD_RETRY_COUNT && retry_count > DOWNLOAD_RETRY_COUNT) {
+		retval = tcm_hcd->enable_irq(tcm_hcd, false, true);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to disable interrupt\n");
+		}
+	} else {
+		retval = tcm_hcd->enable_irq(tcm_hcd, true, NULL);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to enable interrupt\n");
+		}
+	}
+}
+
+static int zeroflash_init(struct syna_tcm_hcd *tcm_hcd)
+{
+	zeroflash_hcd = kzalloc(sizeof(*zeroflash_hcd), GFP_KERNEL);
+	if (!zeroflash_hcd) {
+		LOGE(tcm_hcd->pdev->dev.parent,
+			"Failed to allocate memory for zeroflash_hcd\n");
+		return -ENOMEM;
+	}
+
+	zeroflash_hcd->tcm_hcd = tcm_hcd;
+
+	INIT_BUFFER(zeroflash_hcd->out, false);
+	INIT_BUFFER(zeroflash_hcd->resp, false);
+
+	zeroflash_hcd->workqueue =
+			create_singlethread_workqueue("syna_tcm_zeroflash");
+	INIT_WORK(&zeroflash_hcd->config_work,
+			zeroflash_download_config_work);
+	INIT_WORK(&zeroflash_hcd->firmware_work,
+			zeroflash_download_firmware_work);
+
+	if (tcm_hcd->init_okay == false &&
+			tcm_hcd->hw_if->bus_io->type == BUS_SPI)
+		zeroflash_download_firmware();
+
+	return 0;
+}
+
+static int zeroflash_remove(struct syna_tcm_hcd *tcm_hcd)
+{
+	if (!zeroflash_hcd)
+		goto exit;
+
+	if (zeroflash_hcd->fw_entry)
+		release_firmware(zeroflash_hcd->fw_entry);
+
+	cancel_work_sync(&zeroflash_hcd->config_work);
+	cancel_work_sync(&zeroflash_hcd->firmware_work);
+	flush_workqueue(zeroflash_hcd->workqueue);
+	destroy_workqueue(zeroflash_hcd->workqueue);
+
+	RELEASE_BUFFER(zeroflash_hcd->resp);
+	RELEASE_BUFFER(zeroflash_hcd->out);
+
+	kfree(zeroflash_hcd);
+	zeroflash_hcd = NULL;
+
+exit:
+	complete(&zeroflash_remove_complete);
+
+	return 0;
+}
+
+static int zeroflash_syncbox(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+	unsigned char *fw_status;
+
+	if (!zeroflash_hcd)
+		return 0;
+
+	switch (tcm_hcd->report.id) {
+	case REPORT_STATUS:
+		fw_status = (unsigned char *)&zeroflash_hcd->fw_status;
+		retval = secure_memcpy(fw_status,
+				sizeof(zeroflash_hcd->fw_status),
+				tcm_hcd->report.buffer.buf,
+				tcm_hcd->report.buffer.buf_size,
+				sizeof(zeroflash_hcd->fw_status));
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to copy firmware status\n");
+			return retval;
+		}
+		zeroflash_download_config();
+		break;
+	case REPORT_HDL:
+		retval = tcm_hcd->enable_irq(tcm_hcd, false, true);
+		if (retval < 0) {
+			LOGE(tcm_hcd->pdev->dev.parent,
+					"Failed to disable interrupt\n");
+			return retval;
+		}
+		zeroflash_download_firmware();
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int zeroflash_reset(struct syna_tcm_hcd *tcm_hcd)
+{
+	int retval;
+
+	if (!zeroflash_hcd) {
+		retval = zeroflash_init(tcm_hcd);
+		return retval;
+	}
+
+	return 0;
+}
+
+static struct syna_tcm_module_cb zeroflash_module = {
+	.type = TCM_ZEROFLASH,
+	.init = zeroflash_init,
+	.remove = zeroflash_remove,
+	.syncbox = zeroflash_syncbox,
+	.asyncbox = NULL,
+	.reset = zeroflash_reset,
+	.suspend = NULL,
+	.resume = NULL,
+	.early_suspend = NULL,
+};
+
+static int __init zeroflash_module_init(void)
+{
+	return syna_tcm_add_module(&zeroflash_module, true);
+}
+
+static void __exit zeroflash_module_exit(void)
+{
+	syna_tcm_add_module(&zeroflash_module, false);
+
+	wait_for_completion(&zeroflash_remove_complete);
+}
+
+module_init(zeroflash_module_init);
+module_exit(zeroflash_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics TCM Zeroflash Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu-regs.h b/drivers/iommu/arm-smmu-regs.h
index 0d9688a..3921487 100644
--- a/drivers/iommu/arm-smmu-regs.h
+++ b/drivers/iommu/arm-smmu-regs.h
@@ -259,4 +259,18 @@ enum arm_smmu_s2cr_privcfg {
 
 #define FSYNR0_WNR			(1 << 4)
 
+#define IMPL_DEF1_MICRO_MMU_CTRL	0
+#define MICRO_MMU_CTRL_LOCAL_HALT_REQ	(1 << 2)
+#define MICRO_MMU_CTRL_IDLE		(1 << 3)
+
+/* Definitions for implementation-defined registers */
+#define ACTLR_QCOM_OSH_SHIFT		28
+#define ACTLR_QCOM_OSH			1
+
+#define ACTLR_QCOM_ISH_SHIFT		29
+#define ACTLR_QCOM_ISH			1
+
+#define ACTLR_QCOM_NSH_SHIFT		30
+#define ACTLR_QCOM_NSH			1
+
 #endif /* _ARM_SMMU_REGS_H */
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8456e14..ef2e6a2 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -189,6 +189,7 @@ struct arm_smmu_cb {
 	u32				tcr[2];
 	u32				mair[2];
 	struct arm_smmu_cfg		*cfg;
+	u32                             actlr;
 };
 
 struct arm_smmu_master_cfg {
@@ -4142,6 +4143,152 @@ static struct iommu_ops arm_smmu_ops = {
 	.iova_to_pte = arm_smmu_iova_to_pte,
 };
 
+static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
+{
+	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
+	u32 tmp;
+
+	if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
+					tmp, (tmp & MICRO_MMU_CTRL_IDLE),
+					0, 30000)) {
+		dev_err(smmu->dev, "Couldn't halt SMMU!\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
+{
+	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
+	u32 reg;
+
+	reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+	reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
+
+	writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+
+	return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
+}
+
+static int qsmmuv2_halt(struct arm_smmu_device *smmu)
+{
+	return __qsmmuv2_halt(smmu, true);
+}
+
+static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
+{
+	return __qsmmuv2_halt(smmu, false);
+}
+
+static void qsmmuv2_resume(struct arm_smmu_device *smmu)
+{
+	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
+	u32 reg;
+
+	reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+	reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
+
+	writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+}
+
+static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
+{
+	int i;
+	u32 val;
+	struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
+	/*
+	 * SCTLR.M must be disabled here per ARM SMMUv2 spec
+	 * to prevent table walks with an inconsistent state.
+	 */
+	for (i = 0; i < smmu->num_context_banks; ++i) {
+		struct arm_smmu_cb *cb = &smmu->cbs[i];
+
+		val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
+		ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
+		ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
+		cb->actlr = val;
+	}
+
+	/* Program implementation defined registers */
+	qsmmuv2_halt(smmu);
+	for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
+		writel_relaxed(regs[i].value,
+			ARM_SMMU_GR0(smmu) + regs[i].offset);
+	qsmmuv2_resume(smmu);
+}
+
+static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
+				dma_addr_t iova)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	int ret;
+	phys_addr_t phys = 0;
+	unsigned long flags;
+	u32 sctlr, sctlr_orig, fsr;
+	void __iomem *cb_base;
+
+	ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&smmu->atos_lock, flags);
+	cb_base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
+
+	qsmmuv2_halt_nowait(smmu);
+	writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
+	qsmmuv2_wait_for_halt(smmu);
+
+	/* clear FSR to allow ATOS to log any faults */
+	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+	writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+
+	/* disable stall mode momentarily */
+	sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+	sctlr = sctlr_orig & ~SCTLR_CFCFG;
+	writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+
+	phys = __arm_smmu_iova_to_phys_hard(domain, iova);
+
+	/* restore SCTLR */
+	writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+
+	qsmmuv2_resume(smmu);
+	spin_unlock_irqrestore(&smmu->atos_lock, flags);
+
+	arm_smmu_power_off(smmu_domain->smmu->pwr);
+	return phys;
+}
+
+static void qsmmuv2_init_cb(struct arm_smmu_domain *smmu_domain,
+				struct device *dev)
+{
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	void __iomem *cb_base;
+	struct arm_smmu_cb *cb = &smmu->cbs[smmu_domain->cfg.cbndx];
+	const struct iommu_gather_ops *tlb;
+
+
+	tlb = smmu_domain->pgtbl_cfg.tlb;
+	cb_base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
+
+	writel_relaxed(cb->actlr, cb_base + ARM_SMMU_CB_ACTLR);
+
+	/*
+	 * Flush the context bank after modifying ACTLR to ensure there
+	 * are no cache entries with stale state
+	 */
+	tlb->tlb_flush_all(smmu_domain);
+}
+
+struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
+	.device_reset = qsmmuv2_device_reset,
+	.iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
+	.init_context_bank = qsmmuv2_init_cb,
+};
+
+
 static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
 {
 	int i;
@@ -4801,7 +4948,7 @@ ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
 ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
 		    &qsmmuv500_arch_ops);
-ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, NULL);
+ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
 
 static const struct of_device_id arm_smmu_of_match[] = {
 	{ .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 9a99062..eb0e8a1 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -22,7 +22,7 @@
 
 #define PDC_IPC_LOG_SZ		2
 
-#define PDC_MAX_IRQS		138
+#define PDC_MAX_IRQS		153
 #define PDC_MAX_GPIO_IRQS	256
 
 #define CLEAR_INTR(reg, intr)	(reg & ~(1 << intr))
@@ -498,3 +498,4 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
 IRQCHIP_DECLARE(pdc_sdm845, "qcom,sdm845-pdc", qcom_pdc_init);
 IRQCHIP_DECLARE(pdc_kona,   "qcom,kona-pdc",   qcom_pdc_init);
 IRQCHIP_DECLARE(pdc_lito,   "qcom,lito-pdc",   qcom_pdc_init);
+IRQCHIP_DECLARE(pdc_lagoon,   "qcom,lagoon-pdc",   qcom_pdc_init);
diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h
index ff9a7bd..30afa4a 100644
--- a/drivers/media/platform/msm/npu/npu_common.h
+++ b/drivers/media/platform/msm/npu/npu_common.h
@@ -109,8 +109,6 @@ struct npu_debugfs_ctx {
 	struct dentry *root;
 	uint32_t reg_off;
 	uint32_t reg_cnt;
-	char *buf;
-	size_t buf_len;
 	uint8_t *log_buf;
 	struct mutex log_lock;
 	uint32_t log_num_bytes_buffered;
@@ -119,6 +117,12 @@ struct npu_debugfs_ctx {
 	uint32_t log_buf_size;
 };
 
+struct npu_debugfs_reg_ctx {
+	char *buf;
+	size_t buf_len;
+	struct npu_device *npu_dev;
+};
+
 struct npu_mbox {
 	struct mbox_client client;
 	struct mbox_chan *chan;
diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c
index 51866c8..26bac2e 100644
--- a/drivers/media/platform/msm/npu/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu/npu_debugfs.c
@@ -25,6 +25,7 @@
  */
 static int npu_debug_open(struct inode *inode, struct file *file);
 static int npu_debug_release(struct inode *inode, struct file *file);
+static int npu_debug_reg_open(struct inode *inode, struct file *file);
 static int npu_debug_reg_release(struct inode *inode, struct file *file);
 static ssize_t npu_debug_reg_read(struct file *file,
 		char __user *user_buf, size_t count, loff_t *ppos);
@@ -44,7 +45,7 @@ static ssize_t npu_debug_ctrl_write(struct file *file,
 static struct npu_device *g_npu_dev;
 
 static const struct file_operations npu_reg_fops = {
-	.open = npu_debug_open,
+	.open = npu_debug_reg_open,
 	.release = npu_debug_reg_release,
 	.read = npu_debug_reg_read,
 };
@@ -87,16 +88,28 @@ static int npu_debug_release(struct inode *inode, struct file *file)
 	return 0;
 }
 
+static int npu_debug_reg_open(struct inode *inode, struct file *file)
+{
+	struct npu_debugfs_reg_ctx *reg_ctx;
+
+	reg_ctx = kzalloc(sizeof(*reg_ctx), GFP_KERNEL);
+	if (!reg_ctx)
+		return -ENOMEM;
+
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	reg_ctx->npu_dev = inode->i_private;
+	file->private_data = reg_ctx;
+	return 0;
+}
+
 static int npu_debug_reg_release(struct inode *inode, struct file *file)
 {
-	struct npu_device *npu_dev = file->private_data;
-	struct npu_debugfs_ctx *debugfs;
+	struct npu_debugfs_reg_ctx *reg_ctx = file->private_data;
 
-	debugfs = &npu_dev->debugfs_ctx;
-
-	kfree(debugfs->buf);
-	debugfs->buf_len = 0;
-	debugfs->buf = NULL;
+	kfree(reg_ctx->buf);
+	kfree(reg_ctx);
+	file->private_data = NULL;
 	return 0;
 }
 
@@ -107,7 +120,8 @@ static int npu_debug_reg_release(struct inode *inode, struct file *file)
 static ssize_t npu_debug_reg_read(struct file *file,
 			char __user *user_buf, size_t count, loff_t *ppos)
 {
-	struct npu_device *npu_dev = file->private_data;
+	struct npu_debugfs_reg_ctx *reg_ctx = file->private_data;
+	struct npu_device *npu_dev = reg_ctx->npu_dev;
 	struct npu_debugfs_ctx *debugfs;
 	size_t len;
 
@@ -116,16 +130,16 @@ static ssize_t npu_debug_reg_read(struct file *file,
 	if (debugfs->reg_cnt == 0)
 		return 0;
 
-	if (!debugfs->buf) {
+	if (!reg_ctx->buf) {
 		char dump_buf[64];
 		char *ptr;
 		int cnt, tot, off;
 
-		debugfs->buf_len = sizeof(dump_buf) *
+		reg_ctx->buf_len = sizeof(dump_buf) *
 			DIV_ROUND_UP(debugfs->reg_cnt, ROW_BYTES);
-		debugfs->buf = kzalloc(debugfs->buf_len, GFP_KERNEL);
+		reg_ctx->buf = kzalloc(reg_ctx->buf_len, GFP_KERNEL);
 
-		if (!debugfs->buf)
+		if (!reg_ctx->buf)
 			return -ENOMEM;
 
 		ptr = npu_dev->core_io.base + debugfs->reg_off;
@@ -139,28 +153,28 @@ static ssize_t npu_debug_reg_read(struct file *file,
 			hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
 					   ROW_BYTES, GROUP_BYTES, dump_buf,
 					   sizeof(dump_buf), false);
-			len = scnprintf(debugfs->buf + tot,
-				debugfs->buf_len - tot, "0x%08x: %s\n",
+			len = scnprintf(reg_ctx->buf + tot,
+				reg_ctx->buf_len - tot, "0x%08x: %s\n",
 				((int) (unsigned long) ptr) -
 				((int) (unsigned long) npu_dev->core_io.base),
 				dump_buf);
 
 			ptr += ROW_BYTES;
 			tot += len;
-			if (tot >= debugfs->buf_len)
+			if (tot >= reg_ctx->buf_len)
 				break;
 		}
 		npu_disable_core_power(npu_dev);
 
-		debugfs->buf_len = tot;
+		reg_ctx->buf_len = tot;
 	}
 
-	if (*ppos >= debugfs->buf_len)
+	if (*ppos >= reg_ctx->buf_len)
 		return 0; /* done reading */
 
-	len = min(count, debugfs->buf_len - (size_t) *ppos);
-	NPU_DBG("read %zi %zi\n", count, debugfs->buf_len - (size_t) *ppos);
-	if (copy_to_user(user_buf, debugfs->buf + *ppos, len)) {
+	len = min(count, reg_ctx->buf_len - (size_t) *ppos);
+	NPU_DBG("read %zi %zi\n", count, reg_ctx->buf_len - (size_t) *ppos);
+	if (copy_to_user(user_buf, reg_ctx->buf + *ppos, len)) {
 		NPU_ERR("failed to copy to user\n");
 		return -EFAULT;
 	}
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index dedb341..bf90c3b 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -106,8 +106,8 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
 static int npu_pwrctrl_init(struct npu_device *npu_dev);
 static int npu_probe(struct platform_device *pdev);
 static int npu_remove(struct platform_device *pdev);
-static int npu_suspend(struct platform_device *dev, pm_message_t state);
-static int npu_resume(struct platform_device *dev);
+static int npu_pm_suspend(struct device *dev);
+static int npu_pm_resume(struct device *dev);
 static int __init npu_init(void);
 static void __exit npu_exit(void);
 
@@ -185,17 +185,17 @@ static const struct of_device_id npu_dt_match[] = {
 	{}
 };
 
+static const struct dev_pm_ops npu_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(npu_pm_suspend, npu_pm_resume)
+};
+
 static struct platform_driver npu_driver = {
 	.probe = npu_probe,
 	.remove = npu_remove,
-#if defined(CONFIG_PM)
-	.suspend = npu_suspend,
-	.resume = npu_resume,
-#endif
 	.driver = {
 		.name = "msm_npu",
 		.of_match_table = npu_dt_match,
-		.pm = NULL,
+		.pm = &npu_pm_ops,
 	},
 };
 
@@ -2209,7 +2209,7 @@ static int npu_probe(struct platform_device *pdev)
 	npu_dev->pdev = pdev;
 	mutex_init(&npu_dev->dev_lock);
 
-	platform_set_drvdata(pdev, npu_dev);
+	dev_set_drvdata(&pdev->dev, npu_dev);
 	res = platform_get_resource_byname(pdev,
 		IORESOURCE_MEM, "core");
 	if (!res) {
@@ -2434,6 +2434,7 @@ static int npu_probe(struct platform_device *pdev)
 	unregister_chrdev_region(npu_dev->dev_num, 1);
 	npu_mbox_deinit(npu_dev);
 error_get_dev_num:
+	dev_set_drvdata(&pdev->dev, NULL);
 	return rc;
 }
 
@@ -2451,7 +2452,7 @@ static int npu_remove(struct platform_device *pdev)
 	device_destroy(npu_dev->class, npu_dev->dev_num);
 	class_destroy(npu_dev->class);
 	unregister_chrdev_region(npu_dev->dev_num, 1);
-	platform_set_drvdata(pdev, NULL);
+	dev_set_drvdata(&pdev->dev, NULL);
 	npu_mbox_deinit(npu_dev);
 	msm_bus_scale_unregister_client(npu_dev->bwctrl.bus_client);
 
@@ -2464,17 +2465,27 @@ static int npu_remove(struct platform_device *pdev)
  * Suspend/Resume
  * -------------------------------------------------------------------------
  */
-#if defined(CONFIG_PM)
-static int npu_suspend(struct platform_device *dev, pm_message_t state)
+static int npu_pm_suspend(struct device *dev)
 {
+	struct npu_device *npu_dev;
+
+	npu_dev = dev_get_drvdata(dev);
+	if (!npu_dev) {
+		NPU_ERR("invalid NPU dev\n");
+		return -EINVAL;
+	}
+
+	NPU_DBG("suspend npu\n");
+	npu_host_suspend(npu_dev);
+
 	return 0;
 }
 
-static int npu_resume(struct platform_device *dev)
+static int npu_pm_resume(struct device *dev)
 {
+	NPU_DBG("resume npu\n");
 	return 0;
 }
-#endif
 
 /* -------------------------------------------------------------------------
  * Module Entry Points
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index 7e2aff9..9941ce5 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -2837,3 +2837,10 @@ int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl)
 
 	return param_val;
 }
+
+void npu_host_suspend(struct npu_device *npu_dev)
+{
+	struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+
+	flush_delayed_work(&host_ctx->disable_fw_work);
+}
diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h
index 0beb948..6a24c64 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.h
+++ b/drivers/media/platform/msm/npu/npu_mgr.h
@@ -178,6 +178,7 @@ int npu_host_update_power(struct npu_device *npu_dev);
 int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl,
 	uint32_t perf_mode);
 int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl);
+void npu_host_suspend(struct npu_device *npu_dev);
 void npu_dump_debug_info(struct npu_device *npu_dev);
 void npu_dump_ipc_packet(struct npu_device *npu_dev, void *cmd_ptr);
 
diff --git a/drivers/misc/wigig_sensing.c b/drivers/misc/wigig_sensing.c
index fe366cf..bf8323e 100644
--- a/drivers/misc/wigig_sensing.c
+++ b/drivers/misc/wigig_sensing.c
@@ -2,7 +2,6 @@
 /*
  * Copyright (c) 2019, The Linux foundation. All rights reserved.
  */
-
 #include <linux/cdev.h>
 #include <linux/circ_buf.h>
 #include <linux/clk.h>
@@ -15,6 +14,7 @@
 #include <linux/io.h>
 #include <linux/ioctl.h>
 #include <linux/kernel.h>
+#include <linux/kfifo.h>
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/of.h>
@@ -629,9 +629,29 @@ static int wigig_sensing_ioc_get_num_dropped_bursts(
 	return ctx->dropped_bursts;
 }
 
-static int wigig_sensing_ioc_get_event(struct wigig_sensing_ctx *ctx)
+static int wigig_sensing_ioc_get_num_avail_bursts(
+	struct wigig_sensing_ctx *ctx)
 {
-	return 0;
+	if (ctx->stm.burst_size)
+		return circ_cnt(&ctx->cir_data.b, ctx->cir_data.size_bytes) /
+			ctx->stm.burst_size;
+	else
+		return 0;
+}
+
+static int wigig_sensing_ioc_get_event(struct wigig_sensing_ctx *ctx,
+				       enum wigig_sensing_event *event)
+{
+	u32 copied;
+
+	if (!ctx->event_pending)
+		return -EINVAL;
+
+	if (kfifo_len(&ctx->events_fifo) == 1)
+		ctx->event_pending = false;
+
+	return kfifo_to_user(&ctx->events_fifo, event,
+			     sizeof(enum wigig_sensing_event), &copied);
 }
 
 static int wigig_sensing_open(struct inode *inode, struct file *filp)
@@ -759,7 +779,7 @@ static int wigig_sensing_release(struct inode *inode, struct file *filp)
 }
 
 static long wigig_sensing_ioctl(struct file *file, unsigned int cmd,
-				unsigned long arg)
+				__user unsigned long arg)
 {
 	int rc;
 	struct wigig_sensing_ctx *ctx = file->private_data;
@@ -812,7 +832,12 @@ static long wigig_sensing_ioctl(struct file *file, unsigned int cmd,
 		break;
 	case WIGIG_SENSING_IOCTL_GET_EVENT:
 		pr_info("Received WIGIG_SENSING_IOCTL_GET_EVENT command\n");
-		rc = wigig_sensing_ioc_get_event(ctx);
+		rc = wigig_sensing_ioc_get_event(ctx,
+			(enum wigig_sensing_event *)arg);
+		break;
+	case WIGIG_SENSING_IOCTL_GET_NUM_AVAIL_BURSTS:
+		pr_info("Received WIGIG_SENSING_IOCTL_GET_NUM_AVAIL_BURSTS command\n");
+		rc = wigig_sensing_ioc_get_num_avail_bursts(ctx);
 		break;
 	default:
 		rc = -EINVAL;
@@ -1124,6 +1149,22 @@ static int wigig_sensing_spi_init(struct wigig_sensing_ctx *ctx)
 	return rc;
 }
 
+static int wigig_sensing_send_event(struct wigig_sensing_ctx *ctx,
+				    enum wigig_sensing_event event)
+{
+	if (kfifo_is_full(&ctx->events_fifo)) {
+		pr_err("events fifo is full, unable to send event\n");
+		return -EFAULT;
+	}
+
+	kfifo_in(&ctx->events_fifo, &event, 1);
+	ctx->event_pending = true;
+
+	wake_up_interruptible(&ctx->cmd_wait_q);
+
+	return 0;
+}
+
 static irqreturn_t wigig_sensing_dri_isr_thread(int irq, void *cookie)
 {
 	struct wigig_sensing_ctx *ctx = cookie;
@@ -1211,6 +1252,9 @@ static irqreturn_t wigig_sensing_dri_isr_thread(int irq, void *cookie)
 		wigig_sensing_change_state(ctx, &ctx->stm,
 					   WIGIG_SENSING_STATE_READY_STOPPED);
 
+		/* Send asynchronous FW_READY event to application */
+		wigig_sensing_send_event(ctx, WIGIG_SENSING_EVENT_FW_READY);
+
 		spi_status.v &= ~INT_FW_READY;
 	}
 	if (spi_status.b.int_data_ready) {
@@ -1233,6 +1277,9 @@ static irqreturn_t wigig_sensing_dri_isr_thread(int irq, void *cookie)
 		    ctx->stm.state != WIGIG_SENSING_STATE_SYS_ASSERT)
 			pr_err("State change to WIGIG_SENSING_SYS_ASSERT failed\n");
 
+		/* Send asynchronous RESET event to application */
+		wigig_sensing_send_event(ctx, WIGIG_SENSING_EVENT_RESET);
+
 		ctx->stm.spi_malfunction = true;
 		spi_status.v &= ~INT_SYSASSERT;
 	}
@@ -1305,6 +1352,7 @@ static int wigig_sensing_probe(struct spi_device *spi)
 	init_waitqueue_head(&ctx->cmd_wait_q);
 	init_waitqueue_head(&ctx->data_wait_q);
 	ctx->stm.state = WIGIG_SENSING_STATE_INITIALIZED;
+	INIT_KFIFO(ctx->events_fifo);
 
 	/* Allocate memory for the CIRs */
 	/* Allocate a 2MB == 2^21 buffer for CIR data */
diff --git a/drivers/misc/wigig_sensing.h b/drivers/misc/wigig_sensing.h
index c4b2910..eaf2023 100644
--- a/drivers/misc/wigig_sensing.h
+++ b/drivers/misc/wigig_sensing.h
@@ -7,6 +7,7 @@
 #define __WIGIG_SENSING_H__
 #include <linux/cdev.h>
 #include <linux/circ_buf.h>
+#include <linux/kfifo.h>
 #include <linux/slab.h>
 #include <uapi/misc/wigig_sensing_uapi.h>
 
@@ -193,6 +194,7 @@ struct wigig_sensing_ctx {
 	struct cir_data cir_data;
 	u8 *temp_buffer;
 	bool event_pending;
+	DECLARE_KFIFO(events_fifo, enum wigig_sensing_event, 8);
 	u32 dropped_bursts;
 };
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 6f6ebfb..b215129 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -163,11 +163,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
 			 struct nlattr *tb[], struct nlattr *data[],
 			 struct netlink_ext_ack *extack)
 {
-	u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
 	struct net_device *real_dev;
 	int mode = RMNET_EPMODE_VND;
 	struct rmnet_endpoint *ep;
 	struct rmnet_port *port;
+	u32 data_format;
 	int err = 0;
 	u16 mux_id;
 
@@ -202,11 +202,10 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
 
 		flags = nla_data(data[IFLA_RMNET_FLAGS]);
 		data_format = flags->flags & flags->mask;
+		netdev_dbg(dev, "data format [0x%08X]\n", data_format);
+		port->data_format = data_format;
 	}
 
-	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
-	port->data_format = data_format;
-
 	if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
 		void *agg_params;
 		unsigned long irq_flags;
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index 431ea90..c3d764f 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -18,6 +18,7 @@
 #define MAX_NO_OF_MAC_ADDR		4
 #define QMI_WLFW_MAX_TIMESTAMP_LEN	32
 #define QMI_WLFW_MAX_NUM_MEM_SEG	32
+#define QMI_WLFW_MAX_BUILD_ID_LEN	128
 #define CNSS_RDDM_TIMEOUT_MS		20000
 #define RECOVERY_TIMEOUT		60000
 #define TIME_CLOCK_FREQ_HZ		19200000
@@ -325,6 +326,7 @@ struct cnss_plat_data {
 	struct wlfw_rf_board_info board_info;
 	struct wlfw_soc_info soc_info;
 	struct wlfw_fw_version_info fw_version_info;
+	char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN + 1];
 	u32 otp_version;
 	u32 fw_mem_seg_len;
 	struct cnss_fw_mem fw_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 6f506d7..8cd6963 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -396,9 +396,11 @@ static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
 {
 	int ret;
 
-	ret = cnss_pci_check_link_status(pci_priv);
-	if (ret)
-		return ret;
+	if (!in_interrupt() && !irqs_disabled()) {
+		ret = cnss_pci_check_link_status(pci_priv);
+		if (ret)
+			return ret;
+	}
 
 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
 	    offset < MAX_UNWINDOWED_ADDRESS) {
@@ -421,9 +423,11 @@ static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
 {
 	int ret;
 
-	ret = cnss_pci_check_link_status(pci_priv);
-	if (ret)
-		return ret;
+	if (!in_interrupt() && !irqs_disabled()) {
+		ret = cnss_pci_check_link_status(pci_priv);
+		if (ret)
+			return ret;
+	}
 
 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
 	    offset < MAX_UNWINDOWED_ADDRESS) {
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index 77654ef..d8b6232 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -401,6 +401,9 @@ int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
 			resp->fw_version_info.fw_build_timestamp,
 			QMI_WLFW_MAX_TIMESTAMP_LEN + 1);
 	}
+	if (resp->fw_build_id_valid)
+		strlcpy(plat_priv->fw_build_id, resp->fw_build_id,
+			QMI_WLFW_MAX_BUILD_ID_LEN + 1);
 	if (resp->voltage_mv_valid) {
 		plat_priv->cpr_info.voltage = resp->voltage_mv;
 		cnss_pr_dbg("Voltage for CPR: %dmV\n",
@@ -415,12 +418,13 @@ int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
 	if (resp->otp_version_valid)
 		plat_priv->otp_version = resp->otp_version;
 
-	cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, fw_version: 0x%x, fw_build_timestamp: %s, otp_version: 0x%x\n",
+	cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, fw_version: 0x%x, fw_build_timestamp: %s, fw_build_id: %s, otp_version: 0x%x\n",
 		    plat_priv->chip_info.chip_id,
 		    plat_priv->chip_info.chip_family,
 		    plat_priv->board_info.board_id, plat_priv->soc_info.soc_id,
 		    plat_priv->fw_version_info.fw_version,
 		    plat_priv->fw_version_info.fw_build_timestamp,
+		    plat_priv->fw_build_id,
 		    plat_priv->otp_version);
 
 	kfree(req);
diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c
index 12aba0c..e88b292 100644
--- a/drivers/pci/controller/pci-msm.c
+++ b/drivers/pci/controller/pci-msm.c
@@ -767,7 +767,6 @@ struct msm_pcie_dev_t {
 	void *ipc_log_dump;
 	bool use_19p2mhz_aux_clk;
 	bool use_pinctrl;
-	bool enable_l1ss_timeout;
 	struct pinctrl *pinctrl;
 	struct pinctrl_state *pins_default;
 	struct pinctrl_state *pins_sleep;
@@ -6430,6 +6429,9 @@ int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed,
 		return -EINVAL;
 
 	root_pci_dev = pci_find_pcie_root_port(pci_dev);
+	if (!root_pci_dev)
+		return -ENODEV;
+
 	pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus);
 
 	pcie_capability_read_word(root_pci_dev, PCI_EXP_LNKSTA, &link_status);
@@ -6896,8 +6898,6 @@ static void __msm_pcie_l1ss_timeout_enable(struct msm_pcie_dev_t *pcie_dev)
 
 	msm_pcie_write_mask(pcie_dev->parf +
 			PCIE20_PARF_DEBUG_INT_EN, 0, BIT(0));
-
-	pcie_dev->enable_l1ss_timeout = true;
 }
 
 /* Suspend the PCIe link */
@@ -6923,9 +6923,6 @@ static int msm_pcie_pm_suspend(struct pci_dev *dev,
 		return ret;
 	}
 
-	if (pcie_dev->enable_l1ss_timeout)
-		__msm_pcie_l1ss_timeout_disable(pcie_dev);
-
 	if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
 		&& msm_pcie_confirm_linkup(pcie_dev, true, true,
 			pcie_dev->conf)) {
@@ -7077,9 +7074,6 @@ static int msm_pcie_pm_resume(struct pci_dev *dev,
 			pcie_dev->rc_idx);
 	}
 
-	if (pcie_dev->enable_l1ss_timeout)
-		__msm_pcie_l1ss_timeout_enable(pcie_dev);
-
 	PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
 
 	return ret;
@@ -7136,6 +7130,7 @@ static int msm_pcie_drv_resume(struct msm_pcie_dev_t *pcie_dev)
 	struct msm_pcie_drv_info *drv_info = pcie_dev->drv_info;
 	struct msm_pcie_drv_msg *drv_disable = &drv_info->drv_disable;
 	struct msm_pcie_clk_info_t *clk_info;
+	u32 current_link_speed;
 	int ret, i;
 
 	mutex_lock(&pcie_dev->recovery_lock);
@@ -7189,6 +7184,14 @@ static int msm_pcie_drv_resume(struct msm_pcie_dev_t *pcie_dev)
 		}
 	}
 
+	/* scale CX and rate change based on current GEN speed */
+	current_link_speed = readl_relaxed(pcie_dev->dm_core +
+					PCIE20_CAP_LINKCTRLSTATUS);
+	current_link_speed = ((current_link_speed >> 16) &
+				PCI_EXP_LNKSTA_CLS);
+
+	msm_pcie_scale_link_bandwidth(pcie_dev, current_link_speed);
+
 	/* always ungate clkreq */
 	msm_pcie_write_reg_field(pcie_dev->parf,
 				PCIE20_PARF_CLKREQ_OVERRIDE,
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.h
index e7ff88b..a809246 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3-660.h
@@ -152,7 +152,9 @@
 #define UFS_PHY_PHY_START			PHY_OFF(0x00)
 #define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
 #define UFS_PHY_TX_LARGE_AMP_DRV_LVL		PHY_OFF(0x34)
+#define UFS_PHY_TX_LARGE_AMP_POST_EMP_LVL	PHY_OFF(0x38)
 #define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x3C)
+#define UFS_PHY_TX_SMALL_AMP_POST_EMP_LVL	PHY_OFF(0x40)
 #define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP	PHY_OFF(0xCC)
 #define UFS_PHY_LINECFG_DISABLE			PHY_OFF(0x138)
 #define UFS_PHY_RX_SYM_RESYNC_CTRL		PHY_OFF(0x13C)
@@ -268,14 +270,16 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_1_1[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x5B),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
-	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6D),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0F),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_POST_EMP_LVL, 0x12),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_POST_EMP_LVL, 0x0F),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_HIBERN8_TIME, 0x9A), /* 8 us */
 };
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 0bc52f3..68a88ca 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -533,13 +533,15 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
 		pr_cont("frg ");
 
 	if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) ||
-		(attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) {
+		(attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_1Q)) {
 		pr_cont("src_mac_addr:%pM ", attrib->src_mac_addr);
 	}
 
 	if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
 		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) ||
-		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP)) {
+		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_1Q)) {
 		pr_cont("dst_mac_addr:%pM ", attrib->dst_mac_addr);
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index 79ca342..02895e7fc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -12,6 +12,7 @@
 #include <linux/msm_gsi.h>
 #include <linux/delay.h>
 #include <linux/log2.h>
+#include <linux/gfp.h>
 #include "../ipa_common_i.h"
 #include "ipa_i.h"
 
@@ -54,7 +55,7 @@
 
 #define IPA_MPM_MAX_MHIP_CHAN 3
 
-#define IPA_MPM_NUM_RING_DESC 0x400
+#define IPA_MPM_NUM_RING_DESC 74
 #define IPA_MPM_RING_LEN (IPA_MPM_NUM_RING_DESC - 10)
 
 #define IPA_MPM_MHI_HOST_UL_CHANNEL 4
@@ -352,10 +353,10 @@ struct ipa_mpm_clk_cnt_type {
 struct producer_rings {
 	struct mhi_p_desc *tr_va;
 	struct mhi_p_desc *er_va;
-	void *tre_buff_va[IPA_MPM_RING_LEN];
+	void *tr_buff_va[IPA_MPM_RING_LEN];
 	dma_addr_t tr_pa;
 	dma_addr_t er_pa;
-	dma_addr_t tre_buff_iova[IPA_MPM_RING_LEN];
+	dma_addr_t tr_buff_c_iova[IPA_MPM_RING_LEN];
 	/*
 	 * The iova generated for AP CB,
 	 * used only for dma_map_single to flush the cache.
@@ -537,7 +538,7 @@ static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
 
 	/* check cache coherent */
 	if (ipa_mpm_ctx->dev_info.is_cache_coherent)  {
-		IPA_MPM_DBG_LOW(" enable cache coherent\n");
+		IPA_MPM_DBG_LOW("enable cache coherent\n");
 		prot |= IOMMU_CACHE;
 	}
 
@@ -573,12 +574,19 @@ static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
 		}
 
 		phys_addr = virt_to_phys((void *) va_addr);
+
 		IPA_SMMU_ROUND_TO_PAGE(carved_iova, phys_addr, sz,
 					iova_p, pa_p, size_p);
 
-		/* Flush the cache with dma_map_single for IPA AP CB */
+	/* Flush the cache with dma_map_single for IPA AP CB */
 		*ap_cb_iova = dma_map_single(ipa3_ctx->pdev, va_addr,
-						IPA_MPM_RING_TOTAL_SIZE, dir);
+					size_p, dir);
+
+		if (dma_mapping_error(ipa3_ctx->pdev, *ap_cb_iova)) {
+			IPA_MPM_ERR("dma_map_single failure for entry\n");
+			goto fail_dma_mapping;
+		}
+
 		ret = ipa3_iommu_map(ipa_smmu_domain, iova_p,
 					pa_p, size_p, prot);
 		if (ret) {
@@ -601,14 +609,25 @@ static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
 			ipa_assert();
 		}
 
-		iova = iova_p;
 		cb->next_addr = iova_p + size_p;
+		iova = iova_p;
 	} else {
 		iova = dma_map_single(ipa3_ctx->pdev, va_addr,
 					IPA_MPM_RING_TOTAL_SIZE, dir);
+
+		if (dma_mapping_error(ipa3_ctx->pdev, iova)) {
+			IPA_MPM_ERR("dma_map_single failure for entry\n");
+			goto fail_dma_mapping;
+		}
+
 		*ap_cb_iova = iova;
 	}
 	return iova;
+
+fail_dma_mapping:
+	iova = 0;
+	ipa_assert();
+	return iova;
 }
 
 /**
@@ -813,18 +832,14 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
 {
 	int ipa_ep_idx;
 	int res;
-	struct mhi_p_desc *ev_ring;
-	struct mhi_p_desc *tr_ring;
-	int tr_ring_sz, ev_ring_sz;
-	dma_addr_t ev_ring_iova, tr_ring_iova;
-	dma_addr_t ap_cb_iova;
-	dma_addr_t ap_cb_er_iova;
+	struct mhi_p_desc *er_ring_va, *tr_ring_va;
+	void *buff_va;
+	dma_addr_t er_carved_iova, tr_carved_iova;
+	dma_addr_t ap_cb_tr_iova, ap_cb_er_iova, ap_cb_buff_iova;
 	struct ipa_request_gsi_channel_params gsi_params;
 	int dir;
-	int i;
-	void *buff;
+	int i, k;
 	int result;
-	int k;
 	struct ipa3_ep_context *ep;
 
 	if (mhip_client == IPA_CLIENT_MAX)
@@ -849,92 +864,94 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
 
 	IPA_MPM_FUNC_ENTRY();
 
-	ev_ring_sz = IPA_MPM_RING_TOTAL_SIZE;
-	ev_ring = kzalloc(ev_ring_sz, GFP_KERNEL);
-	if (!ev_ring)
+	if (IPA_MPM_RING_TOTAL_SIZE > PAGE_SIZE) {
+		IPA_MPM_ERR("Ring Size / allocation mismatch\n");
+		ipa_assert();
+	}
+
+	/* Only ring need alignment, separate from buffer */
+	er_ring_va = (struct mhi_p_desc *) get_zeroed_page(GFP_KERNEL);
+
+	if (!er_ring_va)
 		goto fail_evt_alloc;
 
-	tr_ring_sz = IPA_MPM_RING_TOTAL_SIZE;
-	tr_ring = kzalloc(tr_ring_sz, GFP_KERNEL);
-	if (!tr_ring)
+	tr_ring_va = (struct mhi_p_desc *) get_zeroed_page(GFP_KERNEL);
+
+	if (!tr_ring_va)
 		goto fail_tr_alloc;
 
-	tr_ring[0].re_type = MHIP_RE_NOP;
+	tr_ring_va[0].re_type = MHIP_RE_NOP;
 
 	dir = IPA_CLIENT_IS_PROD(mhip_client) ?
 		DMA_TO_HIPA : DMA_FROM_HIPA;
 
 	/* allocate transfer ring elements */
 	for (i = 1, k = 1; i < IPA_MPM_RING_LEN; i++, k++) {
-		buff = kzalloc(TRE_BUFF_SIZE, GFP_KERNEL);
-
-		if (!buff)
+		buff_va = kzalloc(TRE_BUFF_SIZE, GFP_KERNEL);
+		if (!buff_va)
 			goto fail_buff_alloc;
 
-		tr_ring[i].buffer_ptr =
-			ipa_mpm_smmu_map(buff, TRE_BUFF_SIZE, dir,
-				&ap_cb_iova);
-		if (!tr_ring[i].buffer_ptr)
+		tr_ring_va[i].buffer_ptr =
+			ipa_mpm_smmu_map(buff_va, TRE_BUFF_SIZE, dir,
+					&ap_cb_buff_iova);
+
+		if (!tr_ring_va[i].buffer_ptr)
 			goto fail_smmu_map_ring;
 
+		tr_ring_va[i].buff_len = TRE_BUFF_SIZE;
+		tr_ring_va[i].chain = 0;
+		tr_ring_va[i].ieob = 0;
+		tr_ring_va[i].ieot = 0;
+		tr_ring_va[i].bei = 0;
+		tr_ring_va[i].sct = 0;
+		tr_ring_va[i].re_type = MHIP_RE_XFER;
+
 		if (IPA_CLIENT_IS_PROD(mhip_client)) {
-			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_va[k] =
-							buff;
-			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_iova[k] =
-							tr_ring[i].buffer_ptr;
-		} else {
-			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_va[k] =
-							buff;
-			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_iova[k] =
-							tr_ring[i].buffer_ptr;
-		}
-
-
-		tr_ring[i].buff_len = TRE_BUFF_SIZE;
-		tr_ring[i].chain = 0;
-		tr_ring[i].ieob = 0;
-		tr_ring[i].ieot = 0;
-		tr_ring[i].bei = 0;
-		tr_ring[i].sct = 0;
-		tr_ring[i].re_type = MHIP_RE_XFER;
-
-		if (IPA_CLIENT_IS_PROD(mhip_client))
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_va[k] =
+						buff_va;
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[k]
+						= tr_ring_va[i].buffer_ptr;
 			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[k] =
-				ap_cb_iova;
-		else
+						ap_cb_buff_iova;
+		} else {
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_va[k] =
+						buff_va;
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[k]
+						= tr_ring_va[i].buffer_ptr;
 			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[k] =
-				ap_cb_iova;
+						ap_cb_buff_iova;
+		}
 	}
 
-	tr_ring_iova = ipa_mpm_smmu_map(tr_ring, IPA_MPM_PAGE_SIZE, dir,
-		&ap_cb_iova);
-	if (!tr_ring_iova)
+	tr_carved_iova = ipa_mpm_smmu_map(tr_ring_va, PAGE_SIZE, dir,
+		&ap_cb_tr_iova);
+	if (!tr_carved_iova)
 		goto fail_smmu_map_ring;
 
-	ev_ring_iova = ipa_mpm_smmu_map(ev_ring, IPA_MPM_PAGE_SIZE, dir,
+	er_carved_iova = ipa_mpm_smmu_map(er_ring_va, PAGE_SIZE, dir,
 		&ap_cb_er_iova);
-	if (!ev_ring_iova)
+	if (!er_carved_iova)
 		goto fail_smmu_map_ring;
 
 	/* Store Producer channel rings */
 	if (IPA_CLIENT_IS_PROD(mhip_client)) {
 		/* Device UL */
-		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = ev_ring;
-		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = tr_ring;
-		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa = ev_ring_iova;
-		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa = tr_ring_iova;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = er_ring_va;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = tr_ring_va;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_pa = er_carved_iova;
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_pa = tr_carved_iova;
 		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr =
-			ap_cb_iova;
+			ap_cb_tr_iova;
 		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er =
 			ap_cb_er_iova;
 	} else {
 		/* Host UL */
-		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = ev_ring;
-		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = tr_ring;
-		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa = ev_ring_iova;
-		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = tr_ring_iova;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = er_ring_va;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = tr_ring_va;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa = er_carved_iova;
+		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = tr_carved_iova;
 		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr =
-			ap_cb_iova;
+			ap_cb_tr_iova;
 		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er =
 			ap_cb_er_iova;
 	}
@@ -1211,35 +1228,35 @@ static void ipa_mpm_clean_mhip_chan(int mhi_idx,
 		if (IPA_CLIENT_IS_PROD(mhip_client)) {
 			ipa_mpm_smmu_unmap(
 			(dma_addr_t)
-			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_iova[i],
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[i],
 			TRE_BUFF_SIZE, dir,
 			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]);
-			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_iova[i]
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[i]
 								= 0;
 			kfree(
-			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_va[i]);
-			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_va[i]
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_va[i]);
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_va[i]
 								= NULL;
 			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_buff[i]
 								= 0;
-			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tre_buff_iova[i]
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_buff_c_iova[i]
 								= 0;
 		} else {
 			ipa_mpm_smmu_unmap(
 			(dma_addr_t)
-			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_iova[i],
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[i],
 			TRE_BUFF_SIZE, dir,
 			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
 			);
-			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_iova[i]
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[i]
 								= 0;
 			kfree(
-			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_va[i]);
-			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_va[i]
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_va[i]);
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_va[i]
 								= NULL;
 			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_buff[i]
 								= 0;
-			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tre_buff_iova[i]
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_buff_c_iova[i]
 								= 0;
 		}
 	}
@@ -1256,15 +1273,20 @@ static void ipa_mpm_clean_mhip_chan(int mhi_idx,
 			IPA_MPM_PAGE_SIZE, dir,
 			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr);
 
-		kfree(ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va);
-		kfree(ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va);
+		if (ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va) {
+			free_page((unsigned long)
+				ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va);
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = NULL;
+		}
 
-		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.er_va = NULL;
-		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = NULL;
-		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr = 0;
+		if (ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va) {
+			free_page((unsigned long)
+				ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va);
+			ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.tr_va = NULL;
+		}
+
 		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_er = 0;
-
-
+		ipa_mpm_ctx->md[mhi_idx].dl_prod_ring.ap_iova_tr = 0;
 	} else {
 		ipa_mpm_smmu_unmap(
 			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa,
@@ -1278,11 +1300,18 @@ static void ipa_mpm_clean_mhip_chan(int mhi_idx,
 		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_pa = 0;
 		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_pa = 0;
 
-		kfree(ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va);
-		kfree(ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va);
+		if (ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va) {
+			free_page((unsigned long)
+				ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va);
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = NULL;
+		}
 
-		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.er_va = NULL;
-		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = NULL;
+		if (ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va) {
+			free_page((unsigned long)
+				ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va);
+			ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.tr_va = NULL;
+		}
+
 		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_er = 0;
 		ipa_mpm_ctx->md[mhi_idx].ul_prod_ring.ap_iova_tr = 0;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index e3c984d..9e2ec20 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -13,6 +13,11 @@
 #include "ipahal_i.h"
 #include "../../ipa_common_i.h"
 
+#define IPA_MAC_FLT_BITS (IPA_FLT_MAC_DST_ADDR_ETHER_II | \
+		IPA_FLT_MAC_SRC_ADDR_ETHER_II | IPA_FLT_MAC_DST_ADDR_802_3 | \
+		IPA_FLT_MAC_SRC_ADDR_802_3 | IPA_FLT_MAC_DST_ADDR_802_1Q | \
+		IPA_FLT_MAC_SRC_ADDR_802_1Q)
+
 /*
  * struct ipahal_fltrt_obj - Flt/Rt H/W information for specific IPA version
  * @support_hash: Is hashable tables supported
@@ -847,6 +852,112 @@ static void ipa_fltrt_generate_mac_addr_hw_rule(u8 **extra, u8 **rest,
 		*rest = ipa_write_8(mac_addr[i], *rest);
 }
 
+static inline void ipa_fltrt_get_mac_data(const struct ipa_rule_attrib *attrib,
+	uint32_t attrib_mask, u8 *offset, const uint8_t **mac_addr,
+	const uint8_t **mac_addr_mask)
+{
+	if (attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+		*offset = -14;
+		*mac_addr = attrib->dst_mac_addr;
+		*mac_addr_mask = attrib->dst_mac_addr_mask;
+		return;
+	}
+
+	if (attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+		*offset = -8;
+		*mac_addr = attrib->src_mac_addr;
+		*mac_addr_mask = attrib->src_mac_addr_mask;
+		return;
+	}
+
+	if (attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+		*offset = -22;
+		*mac_addr = attrib->dst_mac_addr;
+		*mac_addr_mask = attrib->dst_mac_addr_mask;
+		return;
+	}
+
+	if (attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+		*offset = -16;
+		*mac_addr = attrib->src_mac_addr;
+		*mac_addr_mask = attrib->src_mac_addr_mask;
+		return;
+	}
+
+	if (attrib_mask & IPA_FLT_MAC_DST_ADDR_802_1Q) {
+		*offset = -18;
+		*mac_addr = attrib->dst_mac_addr;
+		*mac_addr_mask = attrib->dst_mac_addr_mask;
+		return;
+	}
+
+	if (attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_1Q) {
+		*offset = -10;
+		*mac_addr = attrib->src_mac_addr;
+		*mac_addr_mask = attrib->src_mac_addr_mask;
+		return;
+	}
+}
+
+static int ipa_fltrt_generate_mac_hw_rule_bdy(u16 *en_rule,
+	const struct ipa_rule_attrib *attrib,
+	u8 *ofst_meq128, u8 **extra, u8 **rest)
+{
+	u8 offset = 0;
+	const uint8_t *mac_addr = NULL;
+	const uint8_t *mac_addr_mask = NULL;
+	int i;
+	uint32_t attrib_mask;
+
+	for (i = 0; i < hweight_long(IPA_MAC_FLT_BITS); i++) {
+		switch (i) {
+		case 0:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_ETHER_II;
+			break;
+		case 1:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_ETHER_II;
+			break;
+		case 2:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_802_3;
+			break;
+		case 3:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_3;
+			break;
+		case 4:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_802_1Q;
+			break;
+		case 5:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_1Q;
+			break;
+		default:
+			return -EPERM;
+		}
+
+		attrib_mask &= attrib->attrib_mask;
+		if (!attrib_mask)
+			continue;
+
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, *ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[*ofst_meq128]);
+
+		ipa_fltrt_get_mac_data(attrib, attrib_mask, &offset,
+			&mac_addr, &mac_addr_mask);
+
+		ipa_fltrt_generate_mac_addr_hw_rule(extra, rest, offset,
+			mac_addr_mask,
+			mac_addr);
+
+		(*ofst_meq128)++;
+	}
+
+	return 0;
+}
+
 static inline int ipa_fltrt_generate_vlan_hw_rule_bdy(u16 *en_rule,
 	const struct ipa_rule_attrib *attrib,
 	u8 *ofst_meq32, u8 **extra, u8 **rest)
@@ -909,80 +1020,10 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
 		extra = ipa_write_8(attrib->u.v4.protocol, extra);
 	}
 
-	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
+	if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+		if (ipa_fltrt_generate_mac_hw_rule_bdy(en_rule, attrib,
+			&ofst_meq128, &extra, &rest))
 			goto err;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -14 => offset of dst mac addr in Ethernet II hdr */
-		ipa_fltrt_generate_mac_addr_hw_rule(
-			&extra,
-			&rest,
-			-14,
-			attrib->dst_mac_addr_mask,
-			attrib->dst_mac_addr);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
-			goto err;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -8 => offset of src mac addr in Ethernet II hdr */
-		ipa_fltrt_generate_mac_addr_hw_rule(
-			&extra,
-			&rest,
-			-8,
-			attrib->src_mac_addr_mask,
-			attrib->src_mac_addr);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
-			goto err;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -22 => offset of dst mac addr in 802.3 hdr */
-		ipa_fltrt_generate_mac_addr_hw_rule(
-			&extra,
-			&rest,
-			-22,
-			attrib->dst_mac_addr_mask,
-			attrib->dst_mac_addr);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
-			goto err;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -16 => offset of src mac addr in 802.3 hdr */
-		ipa_fltrt_generate_mac_addr_hw_rule(
-			&extra,
-			&rest,
-			-16,
-			attrib->src_mac_addr_mask,
-			attrib->src_mac_addr);
-
-		ofst_meq128++;
 	}
 
 	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
@@ -1359,80 +1400,10 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
 		ofst_meq128++;
 	}
 
-	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
+	if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+		if (ipa_fltrt_generate_mac_hw_rule_bdy(en_rule, attrib,
+			&ofst_meq128, &extra, &rest))
 			goto err;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -14 => offset of dst mac addr in Ethernet II hdr */
-		ipa_fltrt_generate_mac_addr_hw_rule(
-			&extra,
-			&rest,
-			-14,
-			attrib->dst_mac_addr_mask,
-			attrib->dst_mac_addr);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
-			goto err;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -8 => offset of src mac addr in Ethernet II hdr */
-		ipa_fltrt_generate_mac_addr_hw_rule(
-			&extra,
-			&rest,
-			-8,
-			attrib->src_mac_addr_mask,
-			attrib->src_mac_addr);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
-			goto err;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -22 => offset of dst mac addr in 802.3 hdr */
-		ipa_fltrt_generate_mac_addr_hw_rule(
-			&extra,
-			&rest,
-			-22,
-			attrib->dst_mac_addr_mask,
-			attrib->dst_mac_addr);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
-			goto err;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -16 => offset of src mac addr in 802.3 hdr */
-		ipa_fltrt_generate_mac_addr_hw_rule(
-			&extra,
-			&rest,
-			-16,
-			attrib->src_mac_addr_mask,
-			attrib->src_mac_addr);
-
-		ofst_meq128++;
 	}
 
 	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
@@ -2079,6 +2050,65 @@ static void ipa_flt_generate_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
 			mac_addr[i];
 }
 
+static int ipa_flt_generate_mac_eq(
+	const struct ipa_rule_attrib *attrib, u16 *en_rule, u8 *ofst_meq128,
+	struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	u8 offset = 0;
+	const uint8_t *mac_addr = NULL;
+	const uint8_t *mac_addr_mask = NULL;
+	int i;
+	uint32_t attrib_mask;
+
+	for (i = 0; i < hweight_long(IPA_MAC_FLT_BITS); i++) {
+		switch (i) {
+		case 0:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_ETHER_II;
+			break;
+		case 1:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_ETHER_II;
+			break;
+		case 2:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_802_3;
+			break;
+		case 3:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_3;
+			break;
+		case 4:
+			attrib_mask = IPA_FLT_MAC_DST_ADDR_802_1Q;
+			break;
+		case 5:
+			attrib_mask = IPA_FLT_MAC_SRC_ADDR_802_1Q;
+			break;
+		default:
+			return -EPERM;
+		}
+
+		attrib_mask &= attrib->attrib_mask;
+		if (!attrib_mask)
+			continue;
+
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, *ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[*ofst_meq128]);
+
+		ipa_fltrt_get_mac_data(attrib, attrib_mask, &offset,
+			&mac_addr, &mac_addr_mask);
+
+		ipa_flt_generate_mac_addr_eq(eq_atrb, offset,
+			mac_addr_mask, mac_addr,
+			*ofst_meq128);
+
+		(*ofst_meq128)++;
+	}
+
+	return 0;
+}
+
 static inline int ipa_flt_generat_vlan_eq(
 	const struct ipa_rule_attrib *attrib, u16 *en_rule, u8 *ofst_meq32,
 	struct ipa_ipfltri_rule_eq *eq_atrb)
@@ -2147,68 +2177,10 @@ static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
 		eq_atrb->protocol_eq = attrib->u.v4.protocol;
 	}
 
-	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
+	if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+		if (ipa_flt_generate_mac_eq(attrib, en_rule,
+			&ofst_meq128, eq_atrb))
 			return -EPERM;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -14 => offset of dst mac addr in Ethernet II hdr */
-		ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
-			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
-			ofst_meq128);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
-			return -EPERM;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -8 => offset of src mac addr in Ethernet II hdr */
-		ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
-			attrib->src_mac_addr_mask, attrib->src_mac_addr,
-			ofst_meq128);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
-			return -EPERM;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -22 => offset of dst mac addr in 802.3 hdr */
-		ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
-			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
-			ofst_meq128);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR("ran out of meq128 eq\n");
-			return -EPERM;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -16 => offset of src mac addr in 802.3 hdr */
-		ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
-			attrib->src_mac_addr_mask, attrib->src_mac_addr,
-			ofst_meq128);
-
-		ofst_meq128++;
 	}
 
 	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
@@ -2625,68 +2597,10 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
 		ofst_meq128++;
 	}
 
-	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR_RL("ran out of meq128 eq\n");
+	if (attrib->attrib_mask & IPA_MAC_FLT_BITS) {
+		if (ipa_flt_generate_mac_eq(attrib, en_rule,
+			&ofst_meq128, eq_atrb))
 			return -EPERM;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -14 => offset of dst mac addr in Ethernet II hdr */
-		ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
-			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
-			ofst_meq128);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR_RL("ran out of meq128 eq\n");
-			return -EPERM;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -8 => offset of src mac addr in Ethernet II hdr */
-		ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
-			attrib->src_mac_addr_mask, attrib->src_mac_addr,
-			ofst_meq128);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR_RL("ran out of meq128 eq\n");
-			return -EPERM;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -22 => offset of dst mac addr in 802.3 hdr */
-		ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
-			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
-			ofst_meq128);
-
-		ofst_meq128++;
-	}
-
-	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
-		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
-			IPAHAL_ERR_RL("ran out of meq128 eq\n");
-			return -EPERM;
-		}
-		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
-			ipa3_0_ofst_meq128[ofst_meq128]);
-
-		/* -16 => offset of src mac addr in 802.3 hdr */
-		ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
-			attrib->src_mac_addr_mask, attrib->src_mac_addr,
-			ofst_meq128);
-
-		ofst_meq128++;
 	}
 
 	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) {
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index c619b9a..daee40e 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -224,6 +224,7 @@ struct fg_dt_props {
 	int	delta_soc_thr;
 	int	vbatt_scale_thr_mv;
 	int	scale_timer_ms;
+	int	force_calib_level;
 	int	esr_timer_chg_fast[NUM_ESR_TIMERS];
 	int	esr_timer_chg_slow[NUM_ESR_TIMERS];
 	int	esr_timer_dischg_fast[NUM_ESR_TIMERS];
@@ -1144,6 +1145,9 @@ static int fg_gen4_set_calibrate_level(struct fg_gen4_chip *chip, int val)
 	if (!chip->pbs_dev)
 		return -ENODEV;
 
+	if (is_debug_batt_id(fg))
+		return 0;
+
 	if (val < 0 || val > 0x83) {
 		pr_err("Incorrect calibration level %d\n", val);
 		return -EINVAL;
@@ -1152,6 +1156,9 @@ static int fg_gen4_set_calibrate_level(struct fg_gen4_chip *chip, int val)
 	if (val == chip->calib_level)
 		return 0;
 
+	if (chip->dt.force_calib_level != -EINVAL)
+		val = chip->dt.force_calib_level;
+
 	buf = (u8)val;
 	rc = fg_write(fg, SDAM1_MEM_124_REG, &buf, 1);
 	if (rc < 0) {
@@ -6068,6 +6075,10 @@ static int fg_gen4_parse_dt(struct fg_gen4_chip *chip)
 					&chip->dt.scale_timer_ms);
 	}
 
+	chip->dt.force_calib_level = -EINVAL;
+	of_property_read_u32(node, "qcom,force-calib-level",
+					&chip->dt.force_calib_level);
+
 	rc = fg_parse_ki_coefficients(fg);
 	if (rc < 0)
 		pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 684248a..4d92ad8 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -535,6 +535,9 @@ static int smb5_parse_dt_misc(struct smb5 *chip, struct device_node *node)
 	chg->suspend_input_on_debug_batt = of_property_read_bool(node,
 					"qcom,suspend-input-on-debug-batt");
 
+	chg->fake_chg_status_on_debug_batt = of_property_read_bool(node,
+					"qcom,fake-chg-status-on-debug-batt");
+
 	rc = of_property_read_u32(node, "qcom,otg-deglitch-time-ms",
 					&chg->otg_delay_ms);
 	if (rc < 0)
@@ -1613,6 +1616,7 @@ static enum power_supply_property smb5_batt_props[] = {
 	POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE,
 };
 
+#define DEBUG_ACCESSORY_TEMP_DECIDEGC	250
 static int smb5_batt_get_prop(struct power_supply *psy,
 		enum power_supply_property psp,
 		union power_supply_propval *val)
@@ -1690,7 +1694,11 @@ static int smb5_batt_get_prop(struct power_supply *psy,
 		rc = smblib_get_prop_batt_iterm(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_TEMP:
-		rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_TEMP, val);
+		if (chg->typec_mode == POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY)
+			val->intval = DEBUG_ACCESSORY_TEMP_DECIDEGC;
+		else
+			rc = smblib_get_prop_from_bms(chg,
+						POWER_SUPPLY_PROP_TEMP, val);
 		break;
 	case POWER_SUPPLY_PROP_TECHNOLOGY:
 		val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index 7e0a399..7f48d20 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -865,6 +865,9 @@ static int smb1390_disable_vote_cb(struct votable *votable, void *data,
 		return rc;
 	}
 
+	smb1390_dbg(chip, PR_INFO, "client: %s, master: %s\n",
+			client, (disable ? "disabled" : "enabled"));
+
 	/* charging may have been disabled by ILIM; send uevent */
 	if (chip->cp_master_psy && (disable != chip->disabled))
 		power_supply_changed(chip->cp_master_psy);
@@ -887,6 +890,9 @@ static int smb1390_slave_disable_vote_cb(struct votable *votable, void *data,
 		return rc;
 	}
 
+	smb1390_dbg(chip, PR_INFO, "client: %s, slave: %s\n",
+			client, (disable ? "disabled" : "enabled"));
+
 	/* Re-distribute ILIM to Master CP when Slave is disabled */
 	if (disable && (chip->ilim_votable)) {
 		ilim_ua = get_effective_result_locked(chip->ilim_votable);
@@ -967,7 +973,7 @@ static int smb1390_ilim_vote_cb(struct votable *votable, void *data,
 			return rc;
 		}
 
-		smb1390_dbg(chip, PR_INFO, "ILIM set to %duA slave_enabled%d\n",
+		smb1390_dbg(chip, PR_INFO, "ILIM set to %duA slave_enabled = %d\n",
 						ilim_uA, slave_enabled);
 		vote(chip->disable_votable, ILIM_VOTER, false, 0);
 	}
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index db4f93b..0be8676 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -41,6 +41,7 @@
 	&& (!chg->typec_legacy || chg->typec_legacy_use_rp_icl))
 
 static void update_sw_icl_max(struct smb_charger *chg, int pst);
+static int smblib_get_prop_typec_mode(struct smb_charger *chg);
 
 int smblib_read(struct smb_charger *chg, u16 addr, u8 *val)
 {
@@ -1364,6 +1365,11 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
 	/* suspend if 25mA or less is requested */
 	bool suspend = (icl_ua <= USBIN_25MA);
 
+	/* Do not configure ICL from SW for DAM cables */
+	if (smblib_get_prop_typec_mode(chg) ==
+			    POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY)
+		return 0;
+
 	if (suspend)
 		return smblib_set_usb_suspend(chg, true);
 
@@ -1910,14 +1916,16 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
 	u8 stat;
 	int rc, suspend = 0;
 
-	rc = smblib_get_prop_from_bms(chg,
-			POWER_SUPPLY_PROP_DEBUG_BATTERY, &pval);
-	if (rc < 0) {
-		pr_err_ratelimited("Couldn't get debug battery prop rc=%d\n",
-				rc);
-	} else if (pval.intval == 1) {
-		val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
-		return 0;
+	if (chg->fake_chg_status_on_debug_batt) {
+		rc = smblib_get_prop_from_bms(chg,
+				POWER_SUPPLY_PROP_DEBUG_BATTERY, &pval);
+		if (rc < 0) {
+			pr_err_ratelimited("Couldn't get debug battery prop rc=%d\n",
+					rc);
+		} else if (pval.intval == 1) {
+			val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+			return 0;
+		}
 	}
 
 	if (chg->dbc_usbov) {
@@ -3585,6 +3593,10 @@ static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
 		return POWER_SUPPLY_TYPEC_SOURCE_HIGH;
 	case SNK_RP_SHORT_BIT:
 		return POWER_SUPPLY_TYPEC_NON_COMPLIANT;
+	case SNK_DAM_500MA_BIT:
+	case SNK_DAM_1500MA_BIT:
+	case SNK_DAM_3000MA_BIT:
+		return POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY;
 	default:
 		break;
 	}
@@ -3873,6 +3885,7 @@ int smblib_get_prop_smb_health(struct smb_charger *chg)
 	int rc;
 	u8 stat;
 	int input_present;
+	union power_supply_propval prop = {0, };
 
 	rc = smblib_is_input_present(chg, &input_present);
 	if (rc < 0)
@@ -3881,6 +3894,17 @@ int smblib_get_prop_smb_health(struct smb_charger *chg)
 	if (input_present == INPUT_NOT_PRESENT)
 		return POWER_SUPPLY_HEALTH_UNKNOWN;
 
+	/*
+	 * SMB health is used only for CP, report UNKNOWN if
+	 * switcher is not enabled.
+	 */
+	if (is_cp_available(chg)) {
+		rc = power_supply_get_property(chg->cp_psy,
+			POWER_SUPPLY_PROP_CP_SWITCHER_EN, &prop);
+		if (!rc && !prop.intval)
+			return POWER_SUPPLY_HEALTH_UNKNOWN;
+	}
+
 	if (chg->wa_flags & SW_THERM_REGULATION_WA) {
 		if (chg->smb_temp == -ENODATA)
 			return POWER_SUPPLY_HEALTH_UNKNOWN;
@@ -6336,9 +6360,8 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
 
 		/*
 		 * Remove USB's CP ILIM vote - inapplicable for wireless
-		 * parallel charging. Also undo FCC STEPPER's 1.5 A vote.
+		 * parallel charging.
 		 */
-		vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0);
 		if (chg->cp_ilim_votable)
 			vote(chg->cp_ilim_votable, ICL_CHANGE_VOTER, false, 0);
 
@@ -6399,15 +6422,19 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
 		vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, false, 0);
 		vote(chg->fcc_main_votable, WLS_PL_CHARGING_VOTER, false, 0);
 
-		/* Force 1500mA FCC on WLS removal if fcc stepper is enabled */
-		if (chg->fcc_stepper_enable)
-			vote(chg->fcc_votable, FCC_STEPPER_VOTER,
-							true, 1500000);
 		chg->last_wls_vout = 0;
 		chg->dcin_aicl_done = false;
 		chg->dcin_icl_user_set = false;
 	}
 
+	/*
+	 * Vote for 1500mA FCC upon WLS detach and remove vote upon attach if
+	 * FCC stepper is enabled.
+	 */
+	if (chg->fcc_stepper_enable && !vbus_present)
+		vote(chg->fcc_votable, FCC_STEPPER_VOTER, !dcin_present,
+				dcin_present ? 0 : 1500000);
+
 	if (chg->dc_psy)
 		power_supply_changed(chg->dc_psy);
 
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 1473dac..c77e875 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -496,6 +496,7 @@ struct smb_charger {
 	int			connector_type;
 	bool			otg_en;
 	bool			suspend_input_on_debug_batt;
+	bool			fake_chg_status_on_debug_batt;
 	int			default_icl_ua;
 	int			otg_cl_ua;
 	bool			uusb_apsd_rerun_done;
diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h
index 2363f24..a5fe691 100644
--- a/drivers/power/supply/qcom/smb5-reg.h
+++ b/drivers/power/supply/qcom/smb5-reg.h
@@ -333,7 +333,10 @@ enum {
  *  TYPEC Peripheral Registers  *
  ********************************/
 #define TYPE_C_SNK_STATUS_REG			(TYPEC_BASE + 0x06)
-#define DETECTED_SRC_TYPE_MASK			GENMASK(3, 0)
+#define DETECTED_SRC_TYPE_MASK			GENMASK(6, 0)
+#define SNK_DAM_500MA_BIT			BIT(6)
+#define SNK_DAM_1500MA_BIT			BIT(5)
+#define SNK_DAM_3000MA_BIT			BIT(4)
 #define SNK_RP_STD_BIT				BIT(3)
 #define SNK_RP_1P5_BIT				BIT(2)
 #define SNK_RP_3P0_BIT				BIT(1)
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 2e5f185..9dd9167 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -92,8 +92,6 @@
 	tristate "QCOM specific hooks to UFS controller platform driver"
 	depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
 	select PHY_QCOM_UFS
-	select EXTCON
-	select EXTCON_STORAGE_CD_GPIO
 	help
 	  This selects the QCOM specific additions to UFSHCD platform driver.
 	  UFS host on QCOM needs some vendor specific configuration before
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index c94cdd9..f4481e0 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -1204,8 +1204,8 @@ static int ufsdbg_config_pwr_mode(struct ufs_hba *hba,
 	/* let's not get into low power until clock scaling is completed */
 	hba->ufs_stats.clk_hold.ctx = DBGFS_CFG_PWR_MODE;
 	ufshcd_hold(hba, false);
-	ufshcd_scsi_block_requests(hba);
 	down_write(&hba->lock);
+	ufshcd_scsi_block_requests(hba);
 	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
 		ret = -EBUSY;
 		goto out;
@@ -1511,54 +1511,6 @@ static const struct file_operations ufsdbg_req_stats_desc = {
 	.write		= ufsdbg_req_stats_write,
 };
 
-
-static int ufsdbg_reset_controller_show(struct seq_file *file, void *data)
-{
-	seq_puts(file, "echo 1 > /sys/kernel/debug/.../reset_controller\n");
-	seq_puts(file, "resets the UFS controller and restores its operational state\n\n");
-
-	return 0;
-}
-
-static int ufsdbg_reset_controller_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, ufsdbg_reset_controller_show,
-						inode->i_private);
-}
-
-static ssize_t ufsdbg_reset_controller_write(struct file *filp,
-		const char __user *ubuf, size_t cnt, loff_t *ppos)
-{
-	struct ufs_hba *hba = filp->f_mapping->host->i_private;
-	unsigned long flags;
-
-	pm_runtime_get_sync(hba->dev);
-	ufshcd_hold(hba, false);
-
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	/*
-	 * simulating a dummy error in order to "convince"
-	 * eh_work to actually reset the controller
-	 */
-	hba->saved_err |= INT_FATAL_ERRORS;
-	hba->silence_err_logs = true;
-	schedule_work(&hba->eh_work);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-	flush_work(&hba->eh_work);
-
-	ufshcd_release(hba, false);
-	pm_runtime_put_sync(hba->dev);
-
-	return cnt;
-}
-
-static const struct file_operations ufsdbg_reset_controller = {
-	.open		= ufsdbg_reset_controller_open,
-	.read		= seq_read,
-	.write		= ufsdbg_reset_controller_write,
-};
-
 static int ufsdbg_clear_err_state(void *data, u64 val)
 {
 	struct ufs_hba *hba = data;
@@ -1747,17 +1699,6 @@ void ufsdbg_add_debugfs(struct ufs_hba *hba)
 		goto err;
 	}
 
-	hba->debugfs_files.reset_controller =
-		debugfs_create_file("reset_controller", 0600,
-			hba->debugfs_files.debugfs_root, hba,
-			&ufsdbg_reset_controller);
-	if (!hba->debugfs_files.reset_controller) {
-		dev_err(hba->dev,
-			"%s: failed create reset_controller debugfs entry\n",
-				__func__);
-		goto err;
-	}
-
 	hba->debugfs_files.err_state =
 		debugfs_create_file("err_state", 0600,
 			hba->debugfs_files.debugfs_root, hba,
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index bd0415c..4f0d064 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -304,20 +304,6 @@ static int ufshcd_parse_pinctrl_info(struct ufs_hba *hba)
 	return ret;
 }
 
-static int ufshcd_parse_extcon_info(struct ufs_hba *hba)
-{
-	struct extcon_dev *extcon;
-
-	extcon = extcon_get_edev_by_phandle(hba->dev, 0);
-	if (IS_ERR(extcon) && PTR_ERR(extcon) != -ENODEV)
-		return PTR_ERR(extcon);
-
-	if (!IS_ERR(extcon))
-		hba->extcon = extcon;
-
-	return 0;
-}
-
 static void ufshcd_parse_gear_limits(struct ufs_hba *hba)
 {
 	struct device *dev = hba->dev;
@@ -527,9 +513,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
 	ufshcd_parse_gear_limits(hba);
 	ufshcd_parse_cmd_timeout(hba);
 	ufshcd_parse_force_g4_flag(hba);
-	err = ufshcd_parse_extcon_info(hba);
-	if (err)
-		goto dealloc_host;
 
 	if (!dev->dma_mask)
 		dev->dma_mask = &dev->coherent_dma_mask;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 22c2174..dc642ff 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -364,26 +364,6 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
 	return ufs_pm_lvl_states[lvl].link_state;
 }
 
-static inline void ufshcd_set_card_online(struct ufs_hba *hba)
-{
-	atomic_set(&hba->card_state, UFS_CARD_STATE_ONLINE);
-}
-
-static inline void ufshcd_set_card_offline(struct ufs_hba *hba)
-{
-	atomic_set(&hba->card_state, UFS_CARD_STATE_OFFLINE);
-}
-
-static inline bool ufshcd_is_card_online(struct ufs_hba *hba)
-{
-	return (atomic_read(&hba->card_state) == UFS_CARD_STATE_ONLINE);
-}
-
-static inline bool ufshcd_is_card_offline(struct ufs_hba *hba)
-{
-	return (atomic_read(&hba->card_state) == UFS_CARD_STATE_OFFLINE);
-}
-
 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba)
 {
 	/*
@@ -416,28 +396,6 @@ static inline void ufshcd_wb_config(struct ufs_hba *hba)
 			__func__, ret);
 }
 
-static inline bool ufshcd_is_device_offline(struct ufs_hba *hba)
-{
-	if (hba->extcon && ufshcd_is_card_offline(hba))
-		return true;
-	else
-		return false;
-}
-
-static int ufshcd_card_get_extcon_state(struct ufs_hba *hba)
-{
-	int ret;
-
-	if (!hba->extcon)
-		return -EINVAL;
-
-	ret = extcon_get_state(hba->extcon, EXTCON_MECHANICAL);
-	if (ret < 0)
-		dev_err(hba->dev, "%s: Failed to check card Extcon state, ret=%d\n",
-				 __func__, ret);
-	return ret;
-}
-
 static inline enum ufs_pm_level
 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
 					enum uic_link_state link_state)
@@ -500,8 +458,6 @@ static struct ufs_dev_fix ufs_fixups[] = {
 		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
 	UFS_FIX(UFS_VENDOR_SKHYNIX, "hC8HL1",
 		UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
-	UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ16",
-		UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
 
 	END_FIX
 };
@@ -543,11 +499,6 @@ static int ufshcd_config_vreg(struct device *dev,
 				struct ufs_vreg *vreg, bool on);
 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg);
 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg);
-static void ufshcd_register_pm_notifier(struct ufs_hba *hba);
-static void ufshcd_unregister_pm_notifier(struct ufs_hba *hba);
-static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
-static void ufshcd_remove_scsi_devices(struct ufs_hba *hba);
-static void ufshcd_detect_card(struct ufs_hba *hba, unsigned long delay);
 
 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
 static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
@@ -1782,8 +1733,8 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
 	 * make sure that there are no outstanding requests when
 	 * clock scaling is in progress
 	 */
-	ufshcd_scsi_block_requests(hba);
 	down_write(&hba->lock);
+	ufshcd_scsi_block_requests(hba);
 	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
 		ret = -EBUSY;
 		up_write(&hba->lock);
@@ -1812,9 +1763,6 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
 {
 	int ret = 0;
 
-	if (ufshcd_is_device_offline(hba))
-		return 0;
-
 	/* let's not get into low power until clock scaling is completed */
 	hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
 	ufshcd_hold_all(hba);
@@ -2196,9 +2144,6 @@ static void ufshcd_ungate_work(struct work_struct *work)
 	ufshcd_hba_vreg_set_hpm(hba);
 	ufshcd_enable_clocks(hba);
 
-	if (ufshcd_is_device_offline(hba))
-		goto unblock_reqs;
-
 	/* Exit from hibern8 */
 	if (ufshcd_can_hibern8_during_gating(hba)) {
 		/* Prevent gating in this path */
@@ -2241,8 +2186,6 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
 start:
 	switch (hba->clk_gating.state) {
 	case CLKS_ON:
-		if (ufshcd_is_device_offline(hba))
-			break;
 		/*
 		 * Wait for the ungate work to complete if in progress.
 		 * Though the clocks may be in ON state, the link could
@@ -2346,9 +2289,6 @@ static void ufshcd_gate_work(struct work_struct *work)
 
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-	if (ufshcd_is_device_offline(hba))
-		goto disable_clocks;
-
 	if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
 	    hba->hibern8_on_idle.is_enabled)
 		/*
@@ -2368,7 +2308,6 @@ static void ufshcd_gate_work(struct work_struct *work)
 		ufshcd_set_link_hibern8(hba);
 	}
 
-disable_clocks:
 	/*
 	 * If auto hibern8 is enabled then the link will already
 	 * be in hibern8 state and the ref clock can be gated.
@@ -3072,8 +3011,6 @@ int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 {
 	hba->lrb[task_tag].issue_time_stamp = ktime_get();
 	hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
-	if (ufshcd_is_device_offline(hba))
-		return -ENOLINK;
 	ufshcd_clk_scaling_start_busy(hba);
 	__set_bit(task_tag, &hba->outstanding_reqs);
 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -3263,9 +3200,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
 		return -EIO;
 	}
 
-	if (ufshcd_is_device_offline(hba))
-		return -ENOLINK;
-
 	if (completion)
 		init_completion(&uic_cmd->done);
 
@@ -3756,12 +3690,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 		goto out_unlock;
 	}
 
-	if (ufshcd_is_device_offline(hba)) {
-		set_host_byte(cmd, DID_BAD_TARGET);
-		cmd->scsi_done(cmd);
-		goto out_unlock;
-	}
-
 	switch (hba->ufshcd_state) {
 	case UFSHCD_STATE_OPERATIONAL:
 		break;
@@ -4092,9 +4020,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
 	unsigned long flags;
 	bool has_read_lock = false;
 
-	if (ufshcd_is_device_offline(hba))
-		return -ENOLINK;
-
 	/*
 	 * May get invoked from shutdown and IOCTL contexts.
 	 * In shutdown context, it comes in with lock acquired.
@@ -5250,7 +5175,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
 	ufshcd_dme_cmd_log(hba, "dme_cmpl_2", hba->active_uic_cmd->command);
 
 out:
-	if (ret && !ufshcd_is_device_offline(hba)) {
+	if (ret) {
 		ufsdbg_set_err_state(hba);
 		ufshcd_print_host_state(hba);
 		ufshcd_print_pwr_info(hba);
@@ -5310,9 +5235,6 @@ static int ufshcd_link_recovery(struct ufs_hba *hba)
 	int ret = 0;
 	unsigned long flags;
 
-	if (ufshcd_is_device_offline(hba))
-		return -ENOLINK;
-
 	/*
 	 * Check if there is any race with fatal error handling.
 	 * If so, wait for it to complete. Even though fatal error
@@ -5337,7 +5259,7 @@ static int ufshcd_link_recovery(struct ufs_hba *hba)
 	hba->ufshcd_state = UFSHCD_STATE_ERROR;
 	hba->force_host_reset = true;
 	ufshcd_set_eh_in_progress(hba);
-	schedule_work(&hba->eh_work);
+	queue_work(hba->recovery_wq, &hba->eh_work);
 
 	/* wait for the reset work to finish */
 	do {
@@ -5416,7 +5338,7 @@ int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 
 	for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
 		ret = __ufshcd_uic_hibern8_enter(hba);
-		if (!ret || ufshcd_is_device_offline(hba))
+		if (!ret)
 			goto out;
 		else if (ret != -EAGAIN)
 			/* Unable to recover the link, so no point proceeding */
@@ -5448,7 +5370,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 			__func__, ret);
 		ret = ufshcd_link_recovery(hba);
 		/* Unable to recover the link, so no point proceeding */
-		if (ret && !ufshcd_is_device_offline(hba))
+		if (ret)
 			BUG_ON(1);
 	} else {
 		ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
@@ -6021,14 +5943,6 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
 out:
 	if (ret)
 		dev_err(hba->dev, "link startup failed %d\n", ret);
-	/*
-	 * For some external cards, link startup succeeds only after few link
-	 * startup attempts and err_state may get set in this case.
-	 * But as the link startup has finally succeded, we are clearing the
-	 * error state.
-	 */
-	else if (hba->extcon)
-		ufsdbg_clr_err_state(hba);
 
 	return ret;
 }
@@ -6383,7 +6297,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 				 * to avoid deadlock between ufshcd_suspend
 				 * and exception event handler.
 				 */
-				if (schedule_work(&hba->eeh_work))
+				if (queue_work(hba->recovery_wq,
+							&hba->eeh_work))
 					pm_runtime_get_noresume(hba->dev);
 			}
 			break;
@@ -6492,7 +6407,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
 			 * to printout the debug messages.
 			 */
 			hba->auto_h8_err = true;
-			schedule_work(&hba->eh_work);
+			queue_work(hba->recovery_wq, &hba->eh_work);
 			retval = IRQ_HANDLED;
 		}
 	}
@@ -7241,17 +7156,6 @@ static void ufshcd_err_handler(struct work_struct *work)
 
 	hba = container_of(work, struct ufs_hba, eh_work);
 
-	if (ufshcd_is_device_offline(hba)) {
-		spin_lock_irqsave(hba->host->host_lock, flags);
-		hba->saved_err = 0;
-		hba->saved_uic_err = 0;
-		hba->saved_ce_err = 0;
-		hba->auto_h8_err = false;
-		hba->force_host_reset = false;
-		hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
-		goto out;
-	}
-
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	ufsdbg_set_err_state(hba);
 
@@ -7441,12 +7345,11 @@ static void ufshcd_rls_handler(struct work_struct *work)
 
 	hba = container_of(work, struct ufs_hba, rls_work);
 
-	if (ufshcd_is_device_offline(hba))
-		return;
-
 	pm_runtime_get_sync(hba->dev);
-	ufshcd_scsi_block_requests(hba);
 	down_write(&hba->lock);
+	ufshcd_scsi_block_requests(hba);
+	if (ufshcd_is_shutdown_ongoing(hba))
+		goto out;
 	ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
 	if (ret) {
 		dev_err(hba->dev,
@@ -7526,7 +7429,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
 				}
 			}
 			if (!hba->full_init_linereset)
-				schedule_work(&hba->rls_work);
+				queue_work(hba->recovery_wq, &hba->rls_work);
 		}
 		retval |= IRQ_HANDLED;
 	}
@@ -7613,10 +7516,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
 			queue_eh_work = true;
 	}
 
-	if (ufshcd_is_device_offline(hba)) {
-		/* ignore UIC errors if card is offline */
-		retval |= IRQ_HANDLED;
-	} else if (queue_eh_work) {
+	if (queue_eh_work) {
 		/*
 		 * update the transfer error masks to sticky bits, let's do this
 		 * irrespective of current ufshcd_state.
@@ -7635,7 +7535,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
 
 			hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
 
-			schedule_work(&hba->eh_work);
+			queue_work(hba->recovery_wq, &hba->eh_work);
 		}
 		retval |= IRQ_HANDLED;
 	}
@@ -7743,7 +7643,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
 		intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 	} while (intr_status && --retries);
 
-	if (retval == IRQ_NONE && !ufshcd_is_device_offline(hba)) {
+	if (retval == IRQ_NONE) {
 		dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
 					__func__, intr_status);
 		ufshcd_hex_dump(hba, "host regs: ", hba->mmio_base,
@@ -8222,7 +8122,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
 	 * There is no point proceeding even after failing
 	 * to recover after multiple retries.
 	 */
-	BUG_ON(err && ufshcd_is_embedded_dev(hba) && !hba->extcon);
+	BUG_ON(err && ufshcd_is_embedded_dev(hba));
 
 	/*
 	 * After reset the door-bell might be cleared, complete
@@ -8273,7 +8173,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
 	 */
 	hba->ufshcd_state = UFSHCD_STATE_ERROR;
 	hba->force_host_reset = true;
-	schedule_work(&hba->eh_work);
+	queue_work(hba->recovery_wq, &hba->eh_work);
 
 	/* wait for the reset work to finish */
 	do {
@@ -9178,8 +9078,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 
 		scsi_scan_host(hba->host);
 		pm_runtime_put_sync(hba->dev);
-		if (hba->extcon)
-			hba->card_rpm_paired = true;
 	}
 
 	/*
@@ -9198,11 +9096,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 	 * If we failed to initialize the device or the device is not
 	 * present, turn off the power/clocks etc.
 	 */
-	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress)
 		pm_runtime_put_sync(hba->dev);
-		if (hba->extcon)
-			hba->card_rpm_paired = true;
-	}
 
 	trace_ufshcd_init(dev_name(hba->dev), ret,
 		ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -9210,197 +9105,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 	return ret;
 }
 
-static void ufshcd_remove_scsi_devices(struct ufs_hba *hba)
-{
-	struct Scsi_Host *shost = hba->host;
-	struct scsi_device *sdev;
-	unsigned long flags;
-
-	spin_lock_irqsave(shost->host_lock, flags);
-restart:
-	list_for_each_entry(sdev, &shost->__devices, siblings) {
-		if (sdev->sdev_state == SDEV_DEL ||
-		    sdev->sdev_state == SDEV_CANCEL ||
-		    !get_device(&sdev->sdev_gendev))
-			continue;
-		spin_unlock_irqrestore(shost->host_lock, flags);
-		scsi_remove_device(sdev);
-		put_device(&sdev->sdev_gendev);
-		spin_lock_irqsave(shost->host_lock, flags);
-		goto restart;
-	}
-	spin_unlock_irqrestore(shost->host_lock, flags);
-}
-
-static void ufshcd_remove_card(struct ufs_hba *hba)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	ufshcd_set_card_removal_ongoing(hba);
-	ufshcd_set_card_offline(hba);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	/* Turn on host vreg and clocks */
-	ufshcd_setup_hba_vreg(hba, true);
-	ufshcd_enable_clocks(hba);
-	/* Make sure clocks are stable */
-	usleep_range(50, 60);
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	ufshcd_hba_stop(hba, false);
-	/* Clear interrupt status and disable interrupts */
-	ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
-		      REG_INTERRUPT_STATUS);
-	ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
-	/*
-	 * Make sure that UFS interrupts are disabled and
-	 * any pending interrupt status is cleared.
-	 */
-	mb();
-	hba->silence_err_logs = true;
-	/* Complete requests that have door-bell cleared by h/w */
-	ufshcd_complete_requests(hba);
-	/* Complete the sync/async UIC command if there is one */
-	if (hba->uic_async_done)
-		complete(hba->uic_async_done);
-	else if (hba->active_uic_cmd)
-		complete(&hba->active_uic_cmd->done);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	cancel_delayed_work_sync(&hba->card_detect_work);
-	/* Flush runtime PM events */
-	pm_runtime_get_sync(hba->dev);
-	/* Clear runtime PM errors if any */
-	pm_runtime_set_active(hba->dev);
-	cancel_work_sync(&hba->rls_work);
-	cancel_work_sync(&hba->eh_work);
-	cancel_work_sync(&hba->eeh_work);
-	hba->auto_bkops_enabled = false;
-	__ufshcd_shutdown_clkscaling(hba);
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	ufshcd_clear_eh_in_progress(hba);
-	hba->saved_err = 0;
-	hba->saved_uic_err = 0;
-	hba->saved_ce_err = 0;
-	hba->auto_h8_err = false;
-	hba->force_host_reset = false;
-	hba->ufshcd_state = UFSHCD_STATE_RESET;
-	hba->silence_err_logs = false;
-	ufsdbg_clr_err_state(hba);
-	ufshcd_set_ufs_dev_poweroff(hba);
-	ufshcd_set_link_off(hba);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	/*
-	 * Remove scsi devices only when we are not in middle
-	 * of system resume events.
-	 */
-	if (!down_trylock(&hba->sdev_sema)) {
-		ufshcd_remove_scsi_devices(hba);
-		up(&hba->sdev_sema);
-	}
-	ufshcd_clear_card_removal_ongoing(hba);
-	pm_runtime_put_sync(hba->dev);
-}
-
-static void ufshcd_card_detect_handler(struct work_struct *work)
-{
-	struct ufs_hba *hba;
-	unsigned long flags;
-	int ret;
-
-	hba = container_of(to_delayed_work(work), struct ufs_hba,
-			   card_detect_work);
-
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (!ufshcd_is_card_removal_ongoing(hba))
-		ufshcd_set_card_online(hba);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-	if (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device) {
-		pm_runtime_get_sync(hba->dev);
-		if (ufshcd_is_clkgating_allowed(hba)) {
-			spin_lock_irqsave(hba->host->host_lock, flags);
-			hba->clk_gating.active_reqs = 0;
-			spin_unlock_irqrestore(hba->host->host_lock, flags);
-		}
-		hba->card_rpm_paired = false;
-		ret = ufshcd_detect_device(hba);
-		if (ret) {
-			ufshcd_set_card_offline(hba);
-			ufsdbg_clr_err_state(hba);
-			dev_err(hba->dev, "%s: device detect failed: %d\n",
-				__func__, ret);
-		}
-
-		/*
-		 * pm_runtime_put_sync() may not be called if
-		 * failure happens before or inside ufshcd_probe_hba()
-		 */
-		if (!hba->card_rpm_paired) {
-			cancel_work_sync(&hba->eh_work);
-			pm_runtime_put_sync(hba->dev);
-		}
-	}
-}
-
-static void ufshcd_detect_card(struct ufs_hba *hba, unsigned long delay)
-{
-	if (hba->extcon && !hba->card_detect_disabled)
-		schedule_delayed_work(&hba->card_detect_work, delay);
-}
-
-static int ufshcd_card_detect_notifier(struct notifier_block *nb,
-				       unsigned long event, void *ptr)
-{
-	struct ufs_hba *hba = container_of(nb, struct ufs_hba, card_detect_nb);
-
-	/*
-	 * card insertion/removal are not frequent events and having this
-	 * message helps if there is some issue with card detection/removal.
-	 */
-	dev_info(hba->dev, "%s: card %s notification rcvd\n",
-		__func__, event ? "inserted" : "removed");
-
-	if (event)
-		ufshcd_detect_card(hba, msecs_to_jiffies(200));
-	else
-		ufshcd_remove_card(hba);
-
-	return NOTIFY_DONE;
-}
-
-static int ufshcd_extcon_register(struct ufs_hba *hba)
-{
-	int ret;
-
-	if (!hba->extcon)
-		return 0;
-
-	hba->card_detect_nb.notifier_call = ufshcd_card_detect_notifier;
-	ret = extcon_register_notifier(hba->extcon,
-				       EXTCON_MECHANICAL,
-				       &hba->card_detect_nb);
-	if (ret)
-		dev_err(hba->dev, "%s: extcon_register_notifier() failed, ret %d\n",
-			__func__, ret);
-
-	return ret;
-}
-
-static int ufshcd_extcon_unregister(struct ufs_hba *hba)
-{
-	int ret;
-
-	if (!hba->extcon)
-		return 0;
-
-	ret = extcon_unregister_notifier(hba->extcon, EXTCON_MECHANICAL,
-					 &hba->card_detect_nb);
-	if (ret)
-		dev_err(hba->dev, "%s: extcon_unregister_notifier() failed, ret %d\n",
-			__func__, ret);
-
-	return ret;
-}
-
 /**
  * ufshcd_async_scan - asynchronous execution for probing hba
  * @data: data pointer to pass to this function
@@ -9410,24 +9114,13 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
 {
 	struct ufs_hba *hba = (struct ufs_hba *)data;
 
-	if (hba->extcon) {
-		ufshcd_hba_stop(hba, true);
-		ufshcd_set_ufs_dev_poweroff(hba);
-		ufshcd_set_link_off(hba);
-		ufshcd_set_card_offline(hba);
-		pm_runtime_put_sync(hba->dev);
-		ufshcd_extcon_register(hba);
-		if (ufshcd_card_get_extcon_state(hba) > 0)
-			ufshcd_detect_card(hba, 0);
-	} else {
-		/*
-		 * Don't allow clock gating and hibern8 enter for faster device
-		 * detection.
-		 */
-		ufshcd_hold_all(hba);
-		ufshcd_probe_hba(hba);
-		ufshcd_release_all(hba);
-	}
+	/*
+	 * Don't allow clock gating and hibern8 enter for faster device
+	 * detection.
+	 */
+	ufshcd_hold_all(hba);
+	ufshcd_probe_hba(hba);
+	ufshcd_release_all(hba);
 }
 
 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
@@ -9909,12 +9602,6 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
 	struct ufs_vreg_info *info = &hba->vreg_info;
 	int ret = 0;
 
-	if (hba->extcon)
-		mutex_lock(&hba->card_mutex);
-
-	if (!on && ufshcd_is_card_removal_ongoing(hba))
-		goto out;
-
 	if (info->vdd_hba) {
 		ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
 
@@ -9922,9 +9609,6 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
 			ufshcd_vops_update_sec_cfg(hba, on);
 	}
 
-out:
-	if (hba->extcon)
-		mutex_unlock(&hba->card_mutex);
 	return ret;
 }
 
@@ -10018,19 +9702,13 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 	bool clk_state_changed = false;
 
 	if (list_empty(head))
-		return ret;
-
-	if (hba->extcon)
-		mutex_lock(&hba->card_mutex);
-
-	if (!on && ufshcd_is_card_removal_ongoing(hba))
-		goto out_unlock;
+		goto out;
 
 	/* call vendor specific bus vote before enabling the clocks */
 	if (on) {
 		ret = ufshcd_vops_set_bus_vote(hba, on);
 		if (ret)
-			goto out_unlock;
+			return ret;
 	}
 
 	/*
@@ -10041,7 +9719,7 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 	if (!on) {
 		ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
 		if (ret)
-			goto out_unlock;
+			return ret;
 	}
 
 	list_for_each_entry(clki, head, list) {
@@ -10080,7 +9758,7 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 	if (on) {
 		ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
 		if (ret)
-			goto out;
+			return ret;
 	}
 
 	/*
@@ -10113,9 +9791,6 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 		trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
 			(on ? "on" : "off"),
 			ktime_to_us(ktime_sub(ktime_get(), start)), ret);
-out_unlock:
-	if (hba->extcon)
-		mutex_unlock(&hba->card_mutex);
 	return ret;
 }
 
@@ -10253,7 +9928,6 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
 static void ufshcd_hba_exit(struct ufs_hba *hba)
 {
 	if (hba->is_powered) {
-		ufshcd_extcon_unregister(hba);
 		ufshcd_variant_hba_exit(hba);
 		ufshcd_setup_vreg(hba, false);
 		if (ufshcd_is_clkscaling_supported(hba)) {
@@ -10263,6 +9937,9 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
 				destroy_workqueue(hba->clk_scaling.workq);
 			ufshcd_devfreq_remove(hba);
 		}
+
+		if (hba->recovery_wq)
+			destroy_workqueue(hba->recovery_wq);
 		ufshcd_disable_clocks(hba, false);
 		ufshcd_setup_hba_vreg(hba, false);
 		hba->is_powered = false;
@@ -10536,55 +10213,6 @@ static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
 		ufshcd_setup_hba_vreg(hba, true);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int ufshcd_pm_notify(struct notifier_block *notify_block,
-			 unsigned long mode, void *unused)
-{
-	struct ufs_hba *hba = container_of(
-		notify_block, struct ufs_hba, pm_notify);
-
-	if (!hba->extcon)
-		return 0;
-
-	switch (mode) {
-	case PM_SUSPEND_PREPARE:
-		hba->card_detect_disabled = true;
-		cancel_delayed_work_sync(&hba->card_detect_work);
-		down(&hba->sdev_sema);
-		break;
-	case PM_POST_SUSPEND:
-		if (ufshcd_is_card_offline(hba) && hba->sdev_ufs_device)
-			ufshcd_remove_scsi_devices(hba);
-		up(&hba->sdev_sema);
-		hba->card_detect_disabled = false;
-		if (ufshcd_card_get_extcon_state(hba) > 0 &&
-		    !hba->sdev_ufs_device)
-			ufshcd_detect_card(hba, 0);
-	}
-
-	return 0;
-}
-
-static void ufshcd_register_pm_notifier(struct ufs_hba *hba)
-{
-	hba->pm_notify.notifier_call = ufshcd_pm_notify;
-	register_pm_notifier(&hba->pm_notify);
-}
-
-static void ufshcd_unregister_pm_notifier(struct ufs_hba *hba)
-{
-	unregister_pm_notifier(&hba->pm_notify);
-}
-#else
-static void ufshcd_register_pm_notifier(struct ufs_hba *hba)
-{
-}
-
-static void ufshcd_unregister_pm_notifier(struct ufs_hba *hba)
-{
-}
-#endif /* CONFIG_PM_SLEEP */
-
 /**
  * ufshcd_suspend - helper function for suspend operations
  * @hba: per adapter instance
@@ -10796,11 +10424,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	if (ret)
 		goto disable_vreg;
 
-	if (hba->extcon &&
-	    (ufshcd_is_card_offline(hba) ||
-	     (ufshcd_is_card_online(hba) && !hba->sdev_ufs_device)))
-		goto skip_dev_ops;
-
 	if (ufshcd_is_link_hibern8(hba)) {
 		ret = ufshcd_uic_hibern8_exit(hba);
 		if (!ret) {
@@ -10849,7 +10472,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	if (hba->clk_scaling.is_allowed)
 		ufshcd_resume_clkscaling(hba);
 
-skip_dev_ops:
 	/* Set Auto-Hibernate timer if supported */
 	ufshcd_set_auto_hibern8_timer(hba);
 
@@ -11091,11 +10713,6 @@ int ufshcd_shutdown(struct ufs_hba *hba)
 	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
 		goto out;
 
-	if (hba->extcon) {
-		hba->card_detect_disabled = true;
-		cancel_delayed_work_sync(&hba->card_detect_work);
-	}
-
 	pm_runtime_get_sync(hba->dev);
 	ufshcd_hold_all(hba);
 	ufshcd_mark_shutdown_ongoing(hba);
@@ -11146,7 +10763,6 @@ void ufshcd_remove(struct ufs_hba *hba)
 	}
 	ufshcd_hba_exit(hba);
 	ufsdbg_remove_debugfs(hba);
-	ufshcd_unregister_pm_notifier(hba);
 }
 EXPORT_SYMBOL_GPL(ufshcd_remove);
 
@@ -11233,6 +10849,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 	int err;
 	struct Scsi_Host *host = hba->host;
 	struct device *dev = hba->dev;
+	char recovery_wq_name[sizeof("ufs_recovery_00")];
 
 	if (!mmio_base) {
 		dev_err(hba->dev,
@@ -11304,14 +10921,20 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 	init_waitqueue_head(&hba->tm_tag_wq);
 
 	/* Initialize work queues */
+	snprintf(recovery_wq_name, ARRAY_SIZE(recovery_wq_name), "%s_%d",
+				"ufs_recovery_wq", host->host_no);
+	hba->recovery_wq = create_singlethread_workqueue(recovery_wq_name);
+	if (!hba->recovery_wq) {
+		dev_err(hba->dev, "%s: failed to create the workqueue\n",
+				__func__);
+		err = -ENOMEM;
+		goto out_disable;
+	}
+
 	INIT_WORK(&hba->eh_work, ufshcd_err_handler);
 	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
-	INIT_DELAYED_WORK(&hba->card_detect_work, ufshcd_card_detect_handler);
 	INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
 
-	sema_init(&hba->sdev_sema, 1);
-	mutex_init(&hba->card_mutex);
-
 	/* Initialize UIC command mutex */
 	mutex_init(&hba->uic_cmd_mutex);
 
@@ -11427,7 +11050,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
 	ufs_sysfs_add_nodes(hba->dev);
 
-	ufshcd_register_pm_notifier(hba);
 	return 0;
 
 out_remove_scsi_host:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 17b9f7d..a0b8a82 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -711,13 +711,6 @@ struct ufshcd_cmd_log {
 	u32 seq_num;
 };
 
-/* UFS card state - hotplug state */
-enum ufshcd_card_state {
-	UFS_CARD_STATE_UNKNOWN	= 0,
-	UFS_CARD_STATE_ONLINE	= 1,
-	UFS_CARD_STATE_OFFLINE	= 2,
-};
-
 /**
  * struct ufs_hba - per adapter private structure
  * @mmio_base: UFSHCI base register address
@@ -752,6 +745,7 @@ enum ufshcd_card_state {
  * @intr_mask: Interrupt Mask Bits
  * @ee_ctrl_mask: Exception event control mask
  * @is_powered: flag to check if HBA is powered
+ * @recovery_wq: Work queue for all recovery workers
  * @eh_work: Worker to handle UFS errors that require s/w attention
  * @eeh_work: Worker to handle exception events
  * @errors: HBA errors
@@ -765,16 +759,6 @@ enum ufshcd_card_state {
  * @debugfs_files: debugfs files associated with the ufs stats
  * @ufshcd_dbg_print: Bitmask for enabling debug prints
  * @extcon: pointer to external connector device
- * @card_detect_nb: card detector notifier registered with @extcon
- * @card_detect_work: work to exectute the card detect function
- * @card_state: card state event, enum ufshcd_card_state defines possible states
- * @card_removal_in_prog: flag to track card removal progress
- * @pm_notify: used to register for PM events
- * @sdev_sema: semaphore to protect scsi devices from being removed
- * @card_mutex: mutex to serialize ON/OFF sequences of hba vregs and clocks
- * @card_rpm_paired: indicates whether runtime PM events are paired after card
- *  detection is finished
- * @card_detect_disabled: to enable/disable card detect
  * @vreg_info: UFS device voltage regulator information
  * @clk_list_head: UFS host controller clocks list node head
  * @pwr_info: holds current power mode
@@ -958,6 +942,7 @@ struct ufs_hba {
 	bool is_powered;
 
 	/* Work Queues */
+	struct workqueue_struct *recovery_wq;
 	struct work_struct eh_work;
 	struct work_struct eeh_work;
 	struct work_struct rls_work;
@@ -1009,17 +994,6 @@ struct ufs_hba {
 	/* Bitmask for enabling debug prints */
 	u32 ufshcd_dbg_print;
 
-	struct extcon_dev *extcon;
-	struct notifier_block card_detect_nb;
-	struct delayed_work card_detect_work;
-	atomic_t card_state;
-	unsigned long card_removal_in_prog;
-	struct notifier_block pm_notify;
-	struct semaphore sdev_sema;
-	struct mutex card_mutex;
-	bool card_rpm_paired;
-	bool card_detect_disabled;
-
 	struct ufs_pa_layer_attr pwr_info;
 	struct ufs_pwr_mode_info max_pwr_info;
 
@@ -1088,21 +1062,6 @@ struct ufs_hba {
 	bool wb_enabled;
 };
 
-static inline void ufshcd_set_card_removal_ongoing(struct ufs_hba *hba)
-{
-	set_bit(0, &hba->card_removal_in_prog);
-}
-
-static inline void ufshcd_clear_card_removal_ongoing(struct ufs_hba *hba)
-{
-	clear_bit(0, &hba->card_removal_in_prog);
-}
-
-static inline bool ufshcd_is_card_removal_ongoing(struct ufs_hba *hba)
-{
-	return !!(test_bit(0, &hba->card_removal_in_prog));
-}
-
 static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
 {
 	set_bit(0, &hba->shutdown_in_prog);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index c1e399a..88ce872 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -44,6 +44,7 @@
 #include <soc/qcom/service-notifier.h>
 #include <soc/qcom/socinfo.h>
 #include <soc/qcom/ramdump.h>
+#include <soc/qcom/scm.h>
 #include "icnss_private.h"
 #include "icnss_qmi.h"
 
@@ -94,6 +95,7 @@ static struct icnss_clk_info icnss_clk_info[] = {
 };
 
 #define ICNSS_CLK_INFO_SIZE		ARRAY_SIZE(icnss_clk_info)
+#define ICNSS_UTIL_GET_SEC_DUMP_STATE  0x10
 
 enum icnss_pdr_cause_index {
 	ICNSS_FW_CRASH,
@@ -1417,6 +1419,26 @@ static void icnss_update_state_send_modem_shutdown(struct icnss_priv *priv,
 	}
 }
 
+static bool icnss_is_mem_dump_allowed(void)
+{
+	struct scm_desc desc = {0};
+	int ret = 0;
+
+	desc.args[0] = 0;
+	desc.arginfo = 0;
+	ret = scm_call2(
+		SCM_SIP_FNID(SCM_SVC_UTIL, ICNSS_UTIL_GET_SEC_DUMP_STATE),
+		&desc);
+
+	if (ret) {
+		icnss_pr_err("SCM DUMP_STATE call failed\n");
+		return false;
+	}
+
+	icnss_pr_dbg("Dump State: %llu\n", desc.ret[0]);
+	return (desc.ret[0] == 1);
+}
+
 static int icnss_modem_notifier_nb(struct notifier_block *nb,
 				  unsigned long code,
 				  void *data)
@@ -1431,8 +1453,10 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
 
 	if (code == SUBSYS_AFTER_SHUTDOWN &&
 	    notif->crashed == CRASH_STATUS_ERR_FATAL) {
-		icnss_pr_info("Collecting msa0 segment dump\n");
-		icnss_msa0_ramdump(priv);
+		if (icnss_is_mem_dump_allowed()) {
+			icnss_pr_info("Collecting msa0 segment dump\n");
+			icnss_msa0_ramdump(priv);
+		}
 		return NOTIFY_OK;
 	}
 
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
index c2f8993..4746a9f 100644
--- a/drivers/soc/qcom/ramdump.c
+++ b/drivers/soc/qcom/ramdump.c
@@ -507,19 +507,19 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
 }
 
 static inline unsigned int set_section_name(const char *name,
-					    struct elfhdr *ehdr)
+					    struct elfhdr *ehdr,
+					    int *strtable_idx)
 {
 	char *strtab = elf_str_table(ehdr);
-	static int strtable_idx = 1;
 	int idx, ret = 0;
 
-	idx = strtable_idx;
+	idx = *strtable_idx;
 	if ((strtab == NULL) || (name == NULL))
 		return 0;
 
 	ret = idx;
 	idx += strlcpy((strtab + idx), name, MAX_NAME_LENGTH);
-	strtable_idx = idx + 1;
+	*strtable_idx = idx + 1;
 
 	return ret;
 }
@@ -533,6 +533,7 @@ static int _do_minidump(void *handle, struct ramdump_segment *segments,
 	struct elfhdr *ehdr;
 	struct elf_shdr *shdr;
 	unsigned long offset, strtbl_off;
+	int strtable_idx = 1;
 
 	/*
 	 * Acquire the consumer lock here, and hold the lock until we are done
@@ -588,13 +589,14 @@ static int _do_minidump(void *handle, struct ramdump_segment *segments,
 	shdr->sh_size = MAX_STRTBL_SIZE;
 	shdr->sh_entsize = 0;
 	shdr->sh_flags = 0;
-	shdr->sh_name = set_section_name("STR_TBL", ehdr);
+	shdr->sh_name = set_section_name("STR_TBL", ehdr, &strtable_idx);
 	shdr++;
 
 	for (i = 0; i < nsegments; i++, shdr++) {
 		/* Update elf header */
 		shdr->sh_type = SHT_PROGBITS;
-		shdr->sh_name = set_section_name(segments[i].name, ehdr);
+		shdr->sh_name = set_section_name(segments[i].name, ehdr,
+							&strtable_idx);
 		shdr->sh_addr = (elf_addr_t)segments[i].address;
 		shdr->sh_size = segments[i].size;
 		shdr->sh_flags = SHF_WRITE;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 1ba689d..f330ef3 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -323,6 +323,9 @@ static struct msm_soc_info cpu_of_id[] = {
 	/* Bengal ID */
 	[417] = {MSM_CPU_BENGAL, "BENGAL"},
 
+	/* Lagoon ID */
+	[434] = {MSM_CPU_LAGOON, "LAGOON"},
+
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	 * considered as unknown CPU.
@@ -1194,6 +1197,10 @@ static void * __init setup_dummy_socinfo(void)
 		dummy_socinfo.id = 417;
 		strlcpy(dummy_socinfo.build_id, "bengal - ",
 		sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_lagoon()) {
+		dummy_socinfo.id = 434;
+		strlcpy(dummy_socinfo.build_id, "lagoon - ",
+		sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdmshrike()) {
 		dummy_socinfo.id = 340;
 		strlcpy(dummy_socinfo.build_id, "sdmshrike - ",
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index e704a96..373e5f0 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -397,7 +397,7 @@ static struct msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags,
 	if (cmd == SPI_RX_ONLY) {
 		eot = 0;
 		chain = 0;
-		eob = 1;
+		eob = 0;
 	} else {
 		eot = 0;
 		chain = 1;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index cb384fc..5ba494e5 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -306,7 +306,7 @@ static void thermal_zone_device_set_polling(struct workqueue_struct *queue,
 		mod_delayed_work(queue, &tz->poll_queue,
 				 msecs_to_jiffies(delay));
 	else
-		cancel_delayed_work_sync(&tz->poll_queue);
+		cancel_delayed_work(&tz->poll_queue);
 }
 
 static void monitor_thermal_zone(struct thermal_zone_device *tz)
@@ -1438,7 +1438,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
 	mutex_unlock(&thermal_list_lock);
 
-	thermal_zone_device_set_polling(NULL, tz, 0);
+	cancel_delayed_work_sync(&tz->poll_queue);
 
 	thermal_set_governor(tz, NULL);
 
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index d7805e7..ad6d4eb 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -150,7 +150,6 @@ struct mtp_dev {
 	} perf[MAX_ITERATION];
 	unsigned int dbg_read_index;
 	unsigned int dbg_write_index;
-	struct mutex  read_mutex;
 };
 
 static void *_mtp_ipc_log;
@@ -650,18 +649,11 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 	dev->state = STATE_BUSY;
 	spin_unlock_irq(&dev->lock);
 
-	mutex_lock(&dev->read_mutex);
-	if (dev->state == STATE_OFFLINE) {
-		r = -EIO;
-		mutex_unlock(&dev->read_mutex);
-		goto done;
-	}
 requeue_req:
 	/* queue a request */
 	req = dev->rx_req[0];
 	req->length = len;
 	dev->rx_done = 0;
-	mutex_unlock(&dev->read_mutex);
 	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
 	if (ret < 0) {
 		r = -EIO;
@@ -687,7 +679,6 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 		usb_ep_dequeue(dev->ep_out, req);
 		goto done;
 	}
-	mutex_lock(&dev->read_mutex);
 	if (dev->state == STATE_BUSY) {
 		/* If we got a 0-len packet, throw it back and try again. */
 		if (req->actual == 0)
@@ -701,7 +692,6 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
 	} else
 		r = -EIO;
 
-	mutex_unlock(&dev->read_mutex);
 done:
 	spin_lock_irq(&dev->lock);
 	if (dev->state == STATE_CANCELED)
@@ -949,12 +939,6 @@ static void receive_file_work(struct work_struct *data)
 
 	while (count > 0 || write_req) {
 		if (count > 0) {
-			mutex_lock(&dev->read_mutex);
-			if (dev->state == STATE_OFFLINE) {
-				r = -EIO;
-				mutex_unlock(&dev->read_mutex);
-				break;
-			}
 			/* queue a request */
 			read_req = dev->rx_req[cur_buf];
 			cur_buf = (cur_buf + 1) % RX_REQ_MAX;
@@ -963,7 +947,6 @@ static void receive_file_work(struct work_struct *data)
 			read_req->length = mtp_rx_req_len;
 
 			dev->rx_done = 0;
-			mutex_unlock(&dev->read_mutex);
 			ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
 			if (ret < 0) {
 				r = -EIO;
@@ -976,25 +959,17 @@ static void receive_file_work(struct work_struct *data)
 		if (write_req) {
 			mtp_log("rx %pK %d\n", write_req, write_req->actual);
 			start_time = ktime_get();
-			mutex_lock(&dev->read_mutex);
-			if (dev->state == STATE_OFFLINE) {
-				r = -EIO;
-				mutex_unlock(&dev->read_mutex);
-				break;
-			}
 			ret = vfs_write(filp, write_req->buf, write_req->actual,
 				&offset);
 			mtp_log("vfs_write %d\n", ret);
 			if (ret != write_req->actual) {
 				r = -EIO;
-				mutex_unlock(&dev->read_mutex);
 				if (dev->state != STATE_OFFLINE)
 					dev->state = STATE_ERROR;
 				if (read_req && !dev->rx_done)
 					usb_ep_dequeue(dev->ep_out, read_req);
 				break;
 			}
-			mutex_unlock(&dev->read_mutex);
 			dev->perf[dev->dbg_write_index].vfs_wtime =
 				ktime_to_us(ktime_sub(ktime_get(), start_time));
 			dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
@@ -1022,12 +997,6 @@ static void receive_file_work(struct work_struct *data)
 				break;
 			}
 
-			mutex_lock(&dev->read_mutex);
-			if (dev->state == STATE_OFFLINE) {
-				r = -EIO;
-				mutex_unlock(&dev->read_mutex);
-				break;
-			}
 			/* Check if we aligned the size due to MTU constraint */
 			if (count < read_req->length)
 				read_req->actual = (read_req->actual > count ?
@@ -1048,7 +1017,6 @@ static void receive_file_work(struct work_struct *data)
 
 			write_req = read_req;
 			read_req = NULL;
-			mutex_unlock(&dev->read_mutex);
 		}
 	}
 
@@ -1501,14 +1469,12 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
 	fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
 	mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
 	mtp_log("dev: %pK\n", dev);
-	mutex_lock(&dev->read_mutex);
 	while ((req = mtp_req_get(dev, &dev->tx_idle)))
 		mtp_request_free(req, dev->ep_in);
 	for (i = 0; i < RX_REQ_MAX; i++)
 		mtp_request_free(dev->rx_req[i], dev->ep_out);
 	while ((req = mtp_req_get(dev, &dev->intr_idle)))
 		mtp_request_free(req, dev->ep_intr);
-	mutex_unlock(&dev->read_mutex);
 	spin_lock_irq(&dev->lock);
 	dev->state = STATE_OFFLINE;
 	dev->cdev = NULL;
@@ -1853,8 +1819,6 @@ struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
 	usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
 					descs, names, THIS_MODULE);
 
-	mutex_init(&fi_mtp->dev->read_mutex);
-
 	return  &fi_mtp->func_inst;
 }
 EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 7f48b7f..f00a53b 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -38,10 +38,14 @@ enum {
 	DRM_PANEL_BLANK_UNBLANK,
 	/* panel: power off */
 	DRM_PANEL_BLANK_POWERDOWN,
+	/* panel: low power mode */
+	DRM_PANEL_BLANK_LP,
 };
 
 struct drm_panel_notifier {
+	int refresh_rate;
 	void *data;
+	uint32_t id;
 };
 
 struct device_node;
diff --git a/include/dt-bindings/clock/qcom,gcc-lagoon.h b/include/dt-bindings/clock/qcom,gcc-lagoon.h
index c3aeb8d..61d10ed 100644
--- a/include/dt-bindings/clock/qcom,gcc-lagoon.h
+++ b/include/dt-bindings/clock/qcom,gcc-lagoon.h
@@ -150,6 +150,8 @@
 #define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK	140
 #define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK	141
 #define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK		142
+#define GCC_RX5_PCIE_CLKREF_CLK			143
+#define GCC_GPU_GPLL0_MAIN_DIV_CLK_SRC		144
 
 /* GCC resets */
 #define GCC_QUSB2PHY_PRIM_BCR			0
@@ -162,5 +164,7 @@
 #define GCC_PCIE_0_PHY_BCR			7
 #define GCC_QUPV3_WRAPPER_0_BCR			8
 #define GCC_QUPV3_WRAPPER_1_BCR			9
+#define GCC_USB3_PHY_PRIM_BCR			10
+#define GCC_USB3_DP_PHY_PRIM_BCR		11
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-lagoon.h b/include/dt-bindings/clock/qcom,gpucc-lagoon.h
index 6ad5527..b60de34 100644
--- a/include/dt-bindings/clock/qcom,gpucc-lagoon.h
+++ b/include/dt-bindings/clock/qcom,gpucc-lagoon.h
@@ -28,5 +28,6 @@
 #define GPU_CC_GX_GMU_CLK					18
 #define GPU_CC_GX_VSENSE_CLK					19
 #define GPU_CC_SLEEP_CLK					20
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK				21
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,videocc-lagoon.h b/include/dt-bindings/clock/qcom,videocc-lagoon.h
index 0508249..8babbbf 100644
--- a/include/dt-bindings/clock/qcom,videocc-lagoon.h
+++ b/include/dt-bindings/clock/qcom,videocc-lagoon.h
@@ -21,4 +21,9 @@
 #define VIDEO_CC_XO_CLK			11
 #define VIDEO_CC_XO_CLK_SRC		12
 
+/* VIDEO_CC resets */
+#define VCODEC_VIDEO_CC_INTERFACE_BCR	0
+#define VCODEC_VIDEO_CC_MVS0_BCR	1
+#define VCODEC_VIDEO_CC_MVSC_BCR	2
+
 #endif
diff --git a/include/dt-bindings/msm/msm-camera.h b/include/dt-bindings/msm/msm-camera.h
index 75abc8a..07817a7 100644
--- a/include/dt-bindings/msm/msm-camera.h
+++ b/include/dt-bindings/msm/msm-camera.h
@@ -31,6 +31,15 @@
 #define CAM_CPAS_PATH_DATA_IPE_MAX_OFFSET \
 	(CAM_CPAS_PATH_DATA_IPE_START_OFFSET + 31)
 
+#define CAM_CPAS_PATH_DATA_OPE_START_OFFSET 64
+#define CAM_CPAS_PATH_DATA_OPE_RD_IN (CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 0)
+#define CAM_CPAS_PATH_DATA_OPE_RD_REF (CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 1)
+#define CAM_CPAS_PATH_DATA_OPE_WR_VID (CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 2)
+#define CAM_CPAS_PATH_DATA_OPE_WR_DISP (CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 3)
+#define CAM_CPAS_PATH_DATA_OPE_WR_REF (CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 4)
+#define CAM_CPAS_PATH_DATA_OPE_MAX_OFFSET \
+	(CAM_CPAS_PATH_DATA_OPE_START_OFFSET + 31)
+
 #define CAM_CPAS_PATH_DATA_CONSO_OFFSET 256
 #define CAM_CPAS_PATH_DATA_ALL (CAM_CPAS_PATH_DATA_CONSO_OFFSET + 0)
 
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 657a68d..39fad31 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -306,6 +306,8 @@ static inline bool coresight_link_late_disable(void)
 	else
 		return false;
 }
+extern void coresight_disable_all_source_link(void);
+extern void coresight_enable_all_source_link(void);
 #else
 static inline struct coresight_device *
 coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -319,6 +321,8 @@ static inline void coresight_abort(void) {}
 static inline void coresight_disable_reg_clk(struct coresight_device *csdev) {}
 static inline int coresight_enable_reg_clk(struct coresight_device *csdev)
 { return -EINVAL; }
+static void coresight_disable_all_source_link(void) {};
+static void coresight_enable_all_source_link(void) {};
 #endif
 
 #if defined(CONFIG_OF) && defined(CONFIG_CORESIGHT)
diff --git a/include/linux/input/synaptics_tcm.h b/include/linux/input/synaptics_tcm.h
new file mode 100644
index 0000000..1d650b4
--- /dev/null
+++ b/include/linux/input/synaptics_tcm.h
@@ -0,0 +1,65 @@
+/*
+ * Synaptics TCM touchscreen driver
+ *
+ * Copyright (C) 2017-2019 Synaptics Incorporated. All rights reserved.
+ *
+ * Copyright (C) 2017-2019 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * INFORMATION CONTAINED IN THIS DOCUMENT IS PROVIDED "AS-IS," AND SYNAPTICS
+ * EXPRESSLY DISCLAIMS ALL EXPRESS AND IMPLIED WARRANTIES, INCLUDING ANY
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE,
+ * AND ANY WARRANTIES OF NON-INFRINGEMENT OF ANY INTELLECTUAL PROPERTY RIGHTS.
+ * IN NO EVENT SHALL SYNAPTICS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OF THE INFORMATION CONTAINED IN THIS DOCUMENT, HOWEVER CAUSED
+ * AND BASED ON ANY THEORY OF LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, AND EVEN IF SYNAPTICS WAS ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE. IF A TRIBUNAL OF COMPETENT JURISDICTION DOES
+ * NOT PERMIT THE DISCLAIMER OF DIRECT DAMAGES OR ANY OTHER DAMAGES, SYNAPTICS'
+ * TOTAL CUMULATIVE LIABILITY TO ANY PARTY SHALL NOT EXCEED ONE HUNDRED U.S.
+ * DOLLARS.
+ */
+
+#ifndef _SYNAPTICS_TCM_H_
+#define _SYNAPTICS_TCM_H_
+
+#define I2C_MODULE_NAME "synaptics_tcm_i2c"
+#define SPI_MODULE_NAME "synaptics_tcm_spi"
+
+struct syna_tcm_board_data {
+	bool x_flip;
+	bool y_flip;
+	bool swap_axes;
+	int irq_gpio;
+	int irq_on_state;
+	int power_gpio;
+	int power_on_state;
+	int reset_gpio;
+	int reset_on_state;
+	unsigned int spi_mode;
+	unsigned int power_delay_ms;
+	unsigned int reset_delay_ms;
+	unsigned int reset_active_ms;
+	unsigned int byte_delay_us;
+	unsigned int block_delay_us;
+	unsigned int ubl_i2c_addr;
+	unsigned int ubl_max_freq;
+	unsigned int ubl_byte_delay_us;
+	unsigned long irq_flags;
+	const char *pwr_reg_name;
+	const char *bus_reg_name;
+	const char *fw_name;
+	bool extend_report;
+};
+
+#endif
diff --git a/include/linux/msm_gpi.h b/include/linux/msm_gpi.h
index 09d94f4..11f9cf7 100644
--- a/include/linux/msm_gpi.h
+++ b/include/linux/msm_gpi.h
@@ -27,9 +27,31 @@ enum msm_gpi_tre_type {
 
 #define MSM_GPI_TRE_TYPE(tre) ((tre->dword[3] >> 16) & 0xFF)
 
+/* Lock TRE */
+#define MSM_GPI_LOCK_TRE_DWORD0 (0)
+#define MSM_GPI_LOCK_TRE_DWORD1 (0)
+#define MSM_GPI_LOCK_TRE_DWORD2 (0)
+#define MSM_GPI_LOCK_TRE_DWORD3(link_rx, bei, ieot, ieob, ch) \
+	((0x3 << 20) | (0x0 << 16) | (link_rx << 11) | (bei << 10) | \
+	(ieot << 9) | (ieob << 8) | ch)
+
+/* Unlock TRE */
+#define MSM_GPI_UNLOCK_TRE_DWORD0 (0)
+#define MSM_GPI_UNLOCK_TRE_DWORD1 (0)
+#define MSM_GPI_UNLOCK_TRE_DWORD2 (0)
+#define MSM_GPI_UNLOCK_TRE_DWORD3(link_rx, bei, ieot, ieob, ch) \
+	((0x3 << 20) | (0x1 << 16) | (link_rx << 11) | (bei << 10) | \
+	(ieot << 9) | (ieob << 8) | ch)
+
 /* DMA w. Buffer TRE */
+#ifdef CONFIG_ARM64
 #define MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(ptr) ((u32)ptr)
 #define MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(ptr) ((u32)(ptr >> 32))
+#else
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(ptr) (ptr)
+#define MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(ptr) 0
+#endif
+
 #define MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(length) (length & 0xFFFFFF)
 #define MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(link_rx, bei, ieot, ieob, ch) \
 	((0x1 << 20) | (0x0 << 16) | (link_rx << 11) | (bei << 10) | \
@@ -50,16 +72,26 @@ enum msm_gpi_tre_type {
 #define MSM_GPI_DMA_IMMEDIATE_TRE_GET_LEN(tre) (tre->dword[2] & 0xF)
 
 /* DMA w. Scatter/Gather List TRE */
+#ifdef CONFIG_ARM64
 #define MSM_GPI_SG_LIST_TRE_DWORD0(ptr) ((u32)ptr)
 #define MSM_GPI_SG_LIST_TRE_DWORD1(ptr) ((u32)(ptr >> 32))
+#else
+#define MSM_GPI_SG_LIST_TRE_DWORD0(ptr) (ptr)
+#define MSM_GPI_SG_LIST_TRE_DWORD1(ptr) 0
+#endif
 #define MSM_GPI_SG_LIST_TRE_DWORD2(length) (length & 0xFFFF)
 #define MSM_GPI_SG_LIST_TRE_DWORD3(link_rx, bei, ieot, ieob, ch) ((0x1 << 20) \
 	| (0x2 << 16) | (link_rx << 11) | (bei << 10) | (ieot << 9) | \
 	(ieob << 8) | ch)
 
 /* SG Element */
+#ifdef CONFIG_ARM64
 #define MSM_GPI_SG_ELEMENT_DWORD0(ptr) ((u32)ptr)
 #define MSM_GPI_SG_ELEMENT_DWORD1(ptr) ((u32)(ptr >> 32))
+#else
+#define MSM_GPI_SG_ELEMENT_DWORD0(ptr) (ptr)
+#define MSM_GPI_SG_ELEMENT_DWORD1(ptr) 0
+#endif
 #define MSM_GSI_SG_ELEMENT_DWORD2(length) (length & 0xFFFFF)
 #define MSM_GSI_SG_ELEMENT_DWORD3 (0)
 
@@ -139,6 +171,12 @@ enum msm_gpi_tre_type {
 	((0x2 << 20) | (0x2 << 16) | (link_rx << 11) | (bei << 10) | \
 	(ieot << 9) | (ieob << 8) | ch)
 
+#ifdef CONFIG_ARM64
+#define MSM_GPI_RING_PHYS_ADDR_UPPER(ptr) ((u32)(ptr >> 32))
+#else
+#define MSM_GPI_RING_PHYS_ADDR_UPPER(ptr) 0
+#endif
+
 /* cmds to perform by using dmaengine_slave_config() */
 enum msm_gpi_ctrl_cmd {
 	MSM_GPI_INIT,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7d4fea4..3949bcf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -120,6 +120,18 @@ enum task_boost_type {
 	TASK_BOOST_END,
 };
 
+/*
+ * Enum for display driver to provide varying refresh rates
+ */
+enum fps {
+	FPS0 = 0,
+	FPS30 = 30,
+	FPS48 = 48,
+	FPS60 = 60,
+	FPS90 = 90,
+	FPS120 = 120,
+};
+
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 
 /*
@@ -557,8 +569,9 @@ extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin,
 					  u32 fmax);
 extern int sched_set_boost(int enable);
 extern void free_task_load_ptrs(struct task_struct *p);
+extern void sched_set_refresh_rate(enum fps fps);
 
-#define RAVG_HIST_SIZE_MAX  5
+#define RAVG_HIST_SIZE_MAX 5
 #define NUM_BUSY_BUCKETS 10
 
 /* ravg represents frequency scaled cpu-demand of tasks */
@@ -602,11 +615,12 @@ struct ravg {
 	u32 sum_history[RAVG_HIST_SIZE_MAX];
 	u32 *curr_window_cpu, *prev_window_cpu;
 	u32 curr_window, prev_window;
-	u16 active_windows;
 	u32 pred_demand;
 	u8 busy_buckets[NUM_BUSY_BUCKETS];
 	u16 demand_scaled;
 	u16 pred_demand_scaled;
+	u64 active_time;
+	u64 last_win_size;
 };
 #else
 static inline void sched_exit(struct task_struct *p) { }
@@ -625,6 +639,8 @@ static inline void free_task_load_ptrs(struct task_struct *p) { }
 
 static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
 					u32 fmin, u32 fmax) { }
+
+static inline void sched_set_refresh_rate(enum fps fps) { }
 #endif /* CONFIG_SCHED_WALT */
 
 struct sched_rt_entity {
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 7f7297e..817cbd3 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -31,9 +31,9 @@ extern unsigned int sysctl_sched_sync_hint_enable;
 extern unsigned int sysctl_sched_cstate_aware;
 extern unsigned int sysctl_sched_wakeup_granularity;
 extern unsigned int sysctl_sched_child_runs_first;
+#ifdef CONFIG_SCHED_WALT
 extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
 extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
-#ifdef CONFIG_SCHED_WALT
 extern unsigned int sysctl_sched_user_hint;
 extern const int sched_user_hint_max;
 extern unsigned int sysctl_sched_cpu_high_irqload;
@@ -54,6 +54,7 @@ extern unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus;
 extern unsigned int sysctl_sched_coloc_busy_hyst;
 extern unsigned int sysctl_sched_coloc_busy_hyst_max_ms;
 extern unsigned int sysctl_sched_window_stats_policy;
+extern unsigned int sysctl_sched_ravg_window_nr_ticks;
 
 extern int
 walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
@@ -64,6 +65,14 @@ walt_proc_user_hint_handler(struct ctl_table *table, int write,
 			 void __user *buffer, size_t *lenp,
 			 loff_t *ppos);
 
+extern int
+sched_ravg_window_handler(struct ctl_table *table, int write,
+			 void __user *buffer, size_t *lenp,
+			 loff_t *ppos);
+
+extern int sched_updown_migrate_handler(struct ctl_table *table,
+					int write, void __user *buffer,
+					size_t *lenp, loff_t *ppos);
 #endif
 
 #if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_DEBUG_PREEMPT)
@@ -125,10 +134,6 @@ extern int sched_rt_handler(struct ctl_table *table, int write,
 		void __user *buffer, size_t *lenp,
 		loff_t *ppos);
 
-extern int sched_updown_migrate_handler(struct ctl_table *table,
-					int write, void __user *buffer,
-					size_t *lenp, loff_t *ppos);
-
 extern int sysctl_numa_balancing(struct ctl_table *table, int write,
 				 void __user *buffer, size_t *lenp,
 				 loff_t *ppos);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 701fef3..03444cd 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -67,6 +67,10 @@ extern int proc_douintvec_capacity(struct ctl_table *table, int write,
 				   void __user *buffer, size_t *lenp,
 				   loff_t *ppos);
 
+extern int proc_douintvec_ravg_window(struct ctl_table *table, int write,
+				      void __user *buffer, size_t *lenp,
+				      loff_t *ppos);
+
 /*
  * Register a set of sysctl names by calling register_sysctl_table
  * with an initialised array of struct ctl_table's.  An entry with 
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 192185a..0dec7c5 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -58,6 +58,8 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,lito")
 #define early_machine_is_bengal()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,bengal")
+#define early_machine_is_lagoon()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,lagoon")
 #define early_machine_is_sdmshrike()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdmshrike")
 #define early_machine_is_sm6150()	\
@@ -91,6 +93,7 @@
 #define early_machine_is_kona()		0
 #define early_machine_is_lito()		0
 #define early_machine_is_bengal()	0
+#define early_machine_is_lagoon()	0
 #define early_machine_is_sdmshrike()	0
 #define early_machine_is_sm6150()	0
 #define early_machine_is_qcs405()	0
@@ -121,6 +124,7 @@ enum msm_cpu {
 	MSM_CPU_KONA,
 	MSM_CPU_LITO,
 	MSM_CPU_BENGAL,
+	MSM_CPU_LAGOON,
 	MSM_CPU_SDMSHRIKE,
 	MSM_CPU_SM6150,
 	MSM_CPU_QCS405,
diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h
index 10fecce..14c15ac 100644
--- a/include/trace/events/walt.h
+++ b/include/trace/events/walt.h
@@ -238,7 +238,7 @@ TRACE_EVENT(sched_update_task_ravg,
 		__dynamic_array(u32,		prev_sum, nr_cpu_ids)
 		__field(u64,			nt_cs)
 		__field(u64,			nt_ps)
-		__field(u32,			active_windows)
+		__field(u64,			active_time)
 		__field(u32,			curr_top)
 		__field(u32,			prev_top)
 	),
@@ -276,12 +276,12 @@ TRACE_EVENT(sched_update_task_ravg,
 						p->ravg.prev_window_cpu);
 		__entry->nt_cs		= rq->nt_curr_runnable_sum;
 		__entry->nt_ps		= rq->nt_prev_runnable_sum;
-		__entry->active_windows	= p->ravg.active_windows;
+		__entry->active_time	= p->ravg.active_time;
 		__entry->curr_top	= rq->curr_top;
 		__entry->prev_top	= rq->prev_top;
 	),
 
-	TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u coloc_demand: %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u (%s) prev_window %u (%s) nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu curr_top %u prev_top %u",
+	    TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u coloc_demand: %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u (%s) prev_window %u (%s) nt_cs %llu nt_ps %llu active_time %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu curr_top %u prev_top %u",
 		__entry->wallclock, __entry->win_start, __entry->delta,
 		task_event_names[__entry->evt], __entry->cpu,
 		__entry->cur_freq, __entry->cur_pid,
@@ -293,7 +293,7 @@ TRACE_EVENT(sched_update_task_ravg,
 		__entry->prev_window,
 		__window_print(p, __get_dynamic_array(prev_sum), nr_cpu_ids),
 		__entry->nt_cs, __entry->nt_ps,
-		__entry->active_windows, __entry->grp_cs,
+		__entry->active_time, __entry->grp_cs,
 		__entry->grp_ps, __entry->grp_nt_cs, __entry->grp_nt_ps,
 		__entry->curr_top, __entry->prev_top)
 );
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 3dcbefe..a990fda 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -211,6 +211,8 @@
 #define IPA_FLT_L2TP_INNER_IPV4_DST_ADDR (1ul << 26)
 #define IPA_FLT_IS_PURE_ACK		(1ul << 27)
 #define IPA_FLT_VLAN_ID			(1ul << 28)
+#define IPA_FLT_MAC_SRC_ADDR_802_1Q	(1ul << 29)
+#define IPA_FLT_MAC_DST_ADDR_802_1Q	(1ul << 30)
 
 /**
  * maximal number of NAT PDNs in the PDN config table
diff --git a/include/uapi/misc/wigig_sensing_uapi.h b/include/uapi/misc/wigig_sensing_uapi.h
index 4fb8221..b9b867e 100644
--- a/include/uapi/misc/wigig_sensing_uapi.h
+++ b/include/uapi/misc/wigig_sensing_uapi.h
@@ -27,8 +27,10 @@ struct wigig_sensing_change_mode {
 };
 
 enum wigig_sensing_event {
+	WIGIG_SENSING_EVENT_MIN,
 	WIGIG_SENSING_EVENT_FW_READY,
 	WIGIG_SENSING_EVENT_RESET,
+	WIGIG_SENSING_EVENT_MAX,
 };
 
 #define WIGIG_SENSING_IOC_MAGIC	'r'
@@ -39,6 +41,7 @@ enum wigig_sensing_event {
 #define WIGIG_SENSING_IOCTL_CLEAR_DATA             (3)
 #define WIGIG_SENSING_IOCTL_GET_NUM_DROPPED_BURSTS (4)
 #define WIGIG_SENSING_IOCTL_GET_EVENT              (5)
+#define WIGIG_SENSING_IOCTL_GET_NUM_AVAIL_BURSTS   (6)
 
 /**
  * Set auto recovery, which means that the system will go back to search mode
@@ -76,17 +79,24 @@ enum wigig_sensing_event {
 	_IO(WIGIG_SENSING_IOC_MAGIC, WIGIG_SENSING_IOCTL_CLEAR_DATA)
 
 /**
- * Get number of bursts that where dropped due to data buffer overflow
+ * Get number of bursts that were dropped due to data buffer overflow
  */
 #define WIGIG_SENSING_IOC_GET_NUM_DROPPED_BURSTS \
 	_IOR(WIGIG_SENSING_IOC_MAGIC, \
-	     WIGIG_SENSING_IOCTL_GET_NUM_DROPPED_BURSTS, uint32_t)
+	     WIGIG_SENSING_IOCTL_GET_NUM_DROPPED_BURSTS, sizeof(uint32_t))
 
 /**
- * Get number of bursts that where dropped due to data buffer overflow
+ * Get asynchronous event (FW_READY, RESET)
  */
 #define WIGIG_SENSING_IOC_GET_EVENT \
 	_IOR(WIGIG_SENSING_IOC_MAGIC, WIGIG_SENSING_IOCTL_GET_EVENT, \
 	     sizeof(enum wigig_sensing_event))
 
-#endif /* ____WIGIG_SENSING_UAPI_H__ */
+/**
+ * Get number of available bursts in the data buffer
+ */
+#define WIGIG_SENSING_IOC_GET_NUM_AVAIL_BURSTS \
+	_IOR(WIGIG_SENSING_IOC_MAGIC, WIGIG_SENSING_IOCTL_GET_NUM_AVAIL_BURSTS,\
+	     sizeof(uint32_t))
+
+#endif /* __WIGIG_SENSING_UAPI_H__ */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1099008..ee38197 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6875,155 +6875,6 @@ void ia64_set_curr_task(int cpu, struct task_struct *p)
 {
 	cpu_curr(cpu) = p;
 }
-
-#endif
-
-#ifdef CONFIG_PROC_SYSCTL
-static int find_capacity_margin_levels(void)
-{
-	int cpu, max_clusters;
-
-	for (cpu = max_clusters = 0; cpu < num_possible_cpus();) {
-		cpu += cpumask_weight(topology_possible_sibling_cpumask(cpu));
-		max_clusters++;
-	}
-
-	/*
-	 * Capacity margin levels is number of clusters available in
-	 * the system subtracted by 1.
-	 */
-	return max_clusters - 1;
-}
-
-static void sched_update_up_migrate_values(int cap_margin_levels,
-				const struct cpumask *cluster_cpus[])
-{
-	int i, cpu;
-
-	if (cap_margin_levels > 1) {
-		/*
-		 * No need to worry about CPUs in last cluster
-		 * if there are more than 2 clusters in the system
-		 */
-		for (i = 0; i < cap_margin_levels; i++)
-			if (cluster_cpus[i])
-				for_each_cpu(cpu, cluster_cpus[i])
-					sched_capacity_margin_up[cpu] =
-					sysctl_sched_capacity_margin_up[i];
-	} else {
-		for_each_possible_cpu(cpu)
-			sched_capacity_margin_up[cpu] =
-				sysctl_sched_capacity_margin_up[0];
-	}
-}
-
-static void sched_update_down_migrate_values(int cap_margin_levels,
-				const struct cpumask *cluster_cpus[])
-{
-	int i, cpu;
-
-	if (cap_margin_levels > 1) {
-		/*
-		 * Skip last cluster as down migrate value isn't needed.
-		 * Because there is no downmigration to it.
-		 */
-		for (i = 0; i < cap_margin_levels; i++)
-			if (cluster_cpus[i])
-				for_each_cpu(cpu, cluster_cpus[i])
-					sched_capacity_margin_down[cpu] =
-					sysctl_sched_capacity_margin_down[i];
-	} else {
-		for_each_possible_cpu(cpu)
-			sched_capacity_margin_down[cpu] =
-				sysctl_sched_capacity_margin_down[0];
-	}
-}
-
-static void sched_update_updown_migrate_values(unsigned int *data,
-					      int cap_margin_levels)
-{
-	int i, cpu;
-	static const struct cpumask *cluster_cpus[MAX_CLUSTERS];
-
-	for (i = cpu = 0; i < MAX_CLUSTERS &&
-				cpu < num_possible_cpus(); i++) {
-		cluster_cpus[i] = topology_possible_sibling_cpumask(cpu);
-		cpu += cpumask_weight(topology_possible_sibling_cpumask(cpu));
-	}
-
-	if (data == &sysctl_sched_capacity_margin_up[0])
-		sched_update_up_migrate_values(cap_margin_levels, cluster_cpus);
-	else
-		sched_update_down_migrate_values(cap_margin_levels,
-						 cluster_cpus);
-}
-
-int sched_updown_migrate_handler(struct ctl_table *table, int write,
-				void __user *buffer, size_t *lenp,
-				loff_t *ppos)
-{
-	int ret, i;
-	unsigned int *data = (unsigned int *)table->data;
-	unsigned int *old_val;
-	static DEFINE_MUTEX(mutex);
-	static int cap_margin_levels = -1;
-
-	mutex_lock(&mutex);
-
-	if (cap_margin_levels == -1 ||
-		table->maxlen != (sizeof(unsigned int) * cap_margin_levels)) {
-		cap_margin_levels = find_capacity_margin_levels();
-		table->maxlen = sizeof(unsigned int) * cap_margin_levels;
-	}
-
-	if (cap_margin_levels <= 0) {
-		ret = -EINVAL;
-		goto unlock_mutex;
-	}
-
-	if (!write) {
-		ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
-		goto unlock_mutex;
-	}
-
-	/*
-	 * Cache the old values so that they can be restored
-	 * if either the write fails (for example out of range values)
-	 * or the downmigrate and upmigrate are not in sync.
-	 */
-	old_val = kzalloc(table->maxlen, GFP_KERNEL);
-	if (!old_val) {
-		ret = -ENOMEM;
-		goto unlock_mutex;
-	}
-
-	memcpy(old_val, data, table->maxlen);
-
-	ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
-
-	if (ret) {
-		memcpy(data, old_val, table->maxlen);
-		goto free_old_val;
-	}
-
-	for (i = 0; i < cap_margin_levels; i++) {
-		if (sysctl_sched_capacity_margin_up[i] >
-				sysctl_sched_capacity_margin_down[i]) {
-			memcpy(data, old_val, table->maxlen);
-			ret = -EINVAL;
-			goto free_old_val;
-		}
-	}
-
-	sched_update_updown_migrate_values(data, cap_margin_levels);
-
-free_old_val:
-	kfree(old_val);
-unlock_mutex:
-	mutex_unlock(&mutex);
-
-	return ret;
-}
 #endif
 
 #ifdef CONFIG_CGROUP_SCHED
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d62b9f5..785c211 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -165,12 +165,6 @@ unsigned int sysctl_sched_cfs_bandwidth_slice		= 5000UL;
  * (default: ~20%)
  */
 unsigned int capacity_margin				= 1280;
-
-/* Migration margins */
-unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS] = {
-			[0 ... MAX_MARGIN_LEVELS-1] = 1078}; /* ~5% margin */
-unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS] = {
-			[0 ... MAX_MARGIN_LEVELS-1] = 1205}; /* ~15% margin */
 unsigned int sched_capacity_margin_up[NR_CPUS] = {
 			[0 ... NR_CPUS-1] = 1078}; /* ~5% margin */
 unsigned int sched_capacity_margin_down[NR_CPUS] = {
@@ -6932,7 +6926,8 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
 		goto out;
 
 	/* fast path for prev_cpu */
-	if ((capacity_orig_of(prev_cpu) == capacity_orig_of(start_cpu)) &&
+	if (((capacity_orig_of(prev_cpu) == capacity_orig_of(start_cpu)) ||
+		asym_cap_siblings(prev_cpu, start_cpu)) &&
 		!cpu_isolated(prev_cpu) && cpu_online(prev_cpu) &&
 		idle_cpu(prev_cpu)) {
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 47f9add..9b1b472 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -991,6 +991,7 @@ struct rq {
 	struct walt_sched_stats walt_stats;
 
 	u64			window_start;
+	u32			prev_window_size;
 	unsigned long		walt_flags;
 
 	u64			cur_irqload;
@@ -2860,8 +2861,6 @@ static inline void restore_cgroup_boost_settings(void) { }
 
 extern int alloc_related_thread_groups(void);
 
-extern unsigned long all_cluster_ids[];
-
 extern void check_for_migration(struct rq *rq, struct task_struct *p);
 
 static inline int is_reserved(int cpu)
@@ -2943,11 +2942,7 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
 
 static inline bool is_min_capacity_cluster(struct sched_cluster *cluster)
 {
-	int cpu = cluster_first_cpu(cluster);
-
-	if (cpu >= num_possible_cpus())
-		return false;
-	return is_min_capacity_cpu(cpu);
+	return is_min_capacity_cpu(cluster_first_cpu(cluster));
 }
 
 #else	/* CONFIG_SCHED_WALT */
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index ea9166f..9ae415a 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -108,7 +108,9 @@ static void release_rq_locks_irqrestore(const cpumask_t *cpus,
 /* Max window size (in ns) = 1s */
 #define MAX_SCHED_RAVG_WINDOW 1000000000
 
-__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
+#define NR_WINDOWS_PER_SEC (NSEC_PER_SEC / MIN_SCHED_RAVG_WINDOW)
+
+__read_mostly unsigned int sysctl_sched_cpu_high_irqload = TICK_NSEC;
 
 unsigned int sysctl_sched_walt_rotate_big_tasks;
 unsigned int walt_rotation_enabled;
@@ -121,9 +123,14 @@ static __read_mostly unsigned int sched_io_is_busy = 1;
 __read_mostly unsigned int sysctl_sched_window_stats_policy =
 	WINDOW_STATS_MAX_RECENT_AVG;
 
+__read_mostly unsigned int sysctl_sched_ravg_window_nr_ticks =
+	(HZ / NR_WINDOWS_PER_SEC);
+
 /* Window size (in ns) */
 __read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
+__read_mostly unsigned int new_sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
 
+u64 sched_ravg_window_change_time;
 /*
  * A after-boot constant divisor for cpu_util_freq_walt() to apply the load
  * boost.
@@ -286,7 +293,11 @@ update_window_start(struct rq *rq, u64 wallclock, int event)
 	u64 old_window_start = rq->window_start;
 
 	delta = wallclock - rq->window_start;
-	BUG_ON(delta < 0);
+	if (delta < 0) {
+		printk_deferred("WALT-BUG CPU%d; wallclock=%llu is lesser than window_start=%llu",
+			rq->cpu, wallclock, rq->window_start);
+		SCHED_BUG_ON(1);
+	}
 	if (delta < sched_ravg_window)
 		return old_window_start;
 
@@ -295,6 +306,7 @@ update_window_start(struct rq *rq, u64 wallclock, int event)
 
 	rq->cum_window_demand_scaled =
 			rq->walt_stats.cumulative_runnable_avg_scaled;
+	rq->prev_window_size = sched_ravg_window;
 
 	return old_window_start;
 }
@@ -638,10 +650,10 @@ static inline void account_load_subtractions(struct rq *rq)
 		ls[i].new_subs = 0;
 	}
 
-	BUG_ON((s64)rq->prev_runnable_sum < 0);
-	BUG_ON((s64)rq->curr_runnable_sum < 0);
-	BUG_ON((s64)rq->nt_prev_runnable_sum < 0);
-	BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
+	SCHED_BUG_ON((s64)rq->prev_runnable_sum < 0);
+	SCHED_BUG_ON((s64)rq->curr_runnable_sum < 0);
+	SCHED_BUG_ON((s64)rq->nt_prev_runnable_sum < 0);
+	SCHED_BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
 }
 
 static inline void create_subtraction_entry(struct rq *rq, u64 ws, int index)
@@ -739,15 +751,50 @@ static inline void inter_cluster_migration_fixup
 	dest_rq->curr_runnable_sum += p->ravg.curr_window;
 	dest_rq->prev_runnable_sum += p->ravg.prev_window;
 
-	src_rq->curr_runnable_sum -=  p->ravg.curr_window_cpu[task_cpu];
-	src_rq->prev_runnable_sum -=  p->ravg.prev_window_cpu[task_cpu];
+	if (src_rq->curr_runnable_sum < p->ravg.curr_window_cpu[task_cpu]) {
+		printk_deferred("WALT-BUG pid=%u CPU%d -> CPU%d src_crs=%llu is lesser than task_contrib=%llu",
+			p->pid, src_rq->cpu, dest_rq->cpu,
+			src_rq->curr_runnable_sum,
+			p->ravg.curr_window_cpu[task_cpu]);
+		walt_task_dump(p);
+		SCHED_BUG_ON(1);
+	}
+	src_rq->curr_runnable_sum -= p->ravg.curr_window_cpu[task_cpu];
+
+	if (src_rq->prev_runnable_sum < p->ravg.prev_window_cpu[task_cpu]) {
+		printk_deferred("WALT-BUG pid=%u CPU%d -> CPU%d src_prs=%llu is lesser than task_contrib=%llu",
+			p->pid, src_rq->cpu, dest_rq->cpu,
+			src_rq->prev_runnable_sum,
+			p->ravg.prev_window_cpu[task_cpu]);
+		walt_task_dump(p);
+		SCHED_BUG_ON(1);
+	}
+	src_rq->prev_runnable_sum -= p->ravg.prev_window_cpu[task_cpu];
 
 	if (new_task) {
 		dest_rq->nt_curr_runnable_sum += p->ravg.curr_window;
 		dest_rq->nt_prev_runnable_sum += p->ravg.prev_window;
 
+		if (src_rq->nt_curr_runnable_sum <
+				p->ravg.curr_window_cpu[task_cpu]) {
+			printk_deferred("WALT-BUG pid=%u CPU%d -> CPU%d src_nt_crs=%llu is lesser than task_contrib=%llu",
+				p->pid, src_rq->cpu, dest_rq->cpu,
+				src_rq->nt_curr_runnable_sum,
+				p->ravg.curr_window_cpu[task_cpu]);
+			walt_task_dump(p);
+			SCHED_BUG_ON(1);
+		}
 		src_rq->nt_curr_runnable_sum -=
 				p->ravg.curr_window_cpu[task_cpu];
+		if (src_rq->nt_prev_runnable_sum <
+				p->ravg.prev_window_cpu[task_cpu]) {
+			printk_deferred("WALT-BUG pid=%u CPU%d -> CPU%d src_nt_prs=%llu is lesser than task_contrib=%llu",
+				p->pid, src_rq->cpu, dest_rq->cpu,
+				src_rq->nt_prev_runnable_sum,
+				p->ravg.prev_window_cpu[task_cpu]);
+			walt_task_dump(p);
+			SCHED_BUG_ON(1);
+		}
 		src_rq->nt_prev_runnable_sum -=
 				p->ravg.prev_window_cpu[task_cpu];
 	}
@@ -757,11 +804,6 @@ static inline void inter_cluster_migration_fixup
 
 	update_cluster_load_subtractions(p, task_cpu,
 			src_rq->window_start, new_task);
-
-	BUG_ON((s64)src_rq->prev_runnable_sum < 0);
-	BUG_ON((s64)src_rq->curr_runnable_sum < 0);
-	BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0);
-	BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0);
 }
 
 static u32 load_to_index(u32 load)
@@ -1333,6 +1375,9 @@ static void rollover_task_window(struct task_struct *p, bool full_window)
 		p->ravg.prev_window_cpu[i] = curr_cpu_windows[i];
 		p->ravg.curr_window_cpu[i] = 0;
 	}
+
+	if (p->ravg.active_time < NEW_TASK_ACTIVE_TIME)
+		p->ravg.active_time += p->ravg.last_win_size;
 }
 
 void sched_set_io_is_busy(int val)
@@ -1455,7 +1500,7 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
 	int p_is_curr_task = (p == rq->curr);
 	u64 mark_start = p->ravg.mark_start;
 	u64 window_start = rq->window_start;
-	u32 window_size = sched_ravg_window;
+	u32 window_size = rq->prev_window_size;
 	u64 delta;
 	u64 *curr_runnable_sum = &rq->curr_runnable_sum;
 	u64 *prev_runnable_sum = &rq->prev_runnable_sum;
@@ -1467,11 +1512,8 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
 	u32 old_curr_window = p->ravg.curr_window;
 
 	new_window = mark_start < window_start;
-	if (new_window) {
+	if (new_window)
 		full_window = (window_start - mark_start) >= window_size;
-		if (p->ravg.active_windows < USHRT_MAX)
-			p->ravg.active_windows++;
-	}
 
 	new_task = is_new_task(p);
 
@@ -1657,7 +1699,7 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
 		 * started at wallclock - irqtime.
 		 */
 
-		BUG_ON(!is_idle_task(p));
+		SCHED_BUG_ON(!is_idle_task(p));
 		mark_start = wallclock - irqtime;
 
 		/*
@@ -2011,7 +2053,7 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
 			rq->cc.time = irqtime;
 		else
 			rq->cc.time = wallclock - p->ravg.mark_start;
-		BUG_ON((s64)rq->cc.time < 0);
+		SCHED_BUG_ON((s64)rq->cc.time < 0);
 	}
 
 	p->cpu_cycles = cur_cycles;
@@ -2067,6 +2109,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
 
 done:
 	p->ravg.mark_start = wallclock;
+	p->ravg.last_win_size = sched_ravg_window;
 
 	run_walt_irq_work(old_window_start, rq);
 }
@@ -2207,14 +2250,36 @@ static void walt_cpus_capacity_changed(const cpumask_t *cpus)
 }
 
 
-static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
-DECLARE_BITMAP(all_cluster_ids, NR_CPUS);
 struct sched_cluster *sched_cluster[NR_CPUS];
-int num_clusters;
+static int num_sched_clusters;
 
 struct list_head cluster_head;
 cpumask_t asym_cap_sibling_cpus = CPU_MASK_NONE;
 
+static struct sched_cluster init_cluster = {
+	.list			=	LIST_HEAD_INIT(init_cluster.list),
+	.id			=	0,
+	.max_power_cost		=	1,
+	.min_power_cost		=	1,
+	.max_possible_capacity	=	1024,
+	.efficiency		=	1,
+	.cur_freq		=	1,
+	.max_freq		=	1,
+	.max_mitigated_freq	=	UINT_MAX,
+	.min_freq		=	1,
+	.max_possible_freq	=	1,
+	.exec_scale_factor	=	1024,
+	.aggr_grp_load		=	0,
+};
+
+void init_clusters(void)
+{
+	init_cluster.cpus = *cpu_possible_mask;
+	raw_spin_lock_init(&init_cluster.load_lock);
+	INIT_LIST_HEAD(&cluster_head);
+	list_add(&init_cluster.list, &cluster_head);
+}
+
 static void
 insert_cluster(struct sched_cluster *cluster, struct list_head *head)
 {
@@ -2276,8 +2341,22 @@ static void add_cluster(const struct cpumask *cpus, struct list_head *head)
 		cpu_rq(i)->cluster = cluster;
 
 	insert_cluster(cluster, head);
-	set_bit(num_clusters, all_cluster_ids);
-	num_clusters++;
+	num_sched_clusters++;
+}
+
+static void cleanup_clusters(struct list_head *head)
+{
+	struct sched_cluster *cluster, *tmp;
+	int i;
+
+	list_for_each_entry_safe(cluster, tmp, head, list) {
+		for_each_cpu(i, &cluster->cpus)
+			cpu_rq(i)->cluster = &init_cluster;
+
+		list_del(&cluster->list);
+		num_sched_clusters--;
+		kfree(cluster);
+	}
 }
 
 static int compute_max_possible_capacity(struct sched_cluster *cluster)
@@ -2393,7 +2472,11 @@ void update_cluster_topology(void)
 
 	for_each_cpu(i, &cpus) {
 		cluster_cpus = topology_possible_sibling_cpumask(i);
-		cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus);
+		if (cpumask_empty(cluster_cpus)) {
+			WARN(1, "WALT: Invalid cpu topology!!");
+			cleanup_clusters(&new_head);
+			return;
+		}
 		cpumask_andnot(&cpus, &cpus, cluster_cpus);
 		add_cluster(cluster_cpus, &new_head);
 	}
@@ -2417,30 +2500,6 @@ void update_cluster_topology(void)
 		cpumask_clear(&asym_cap_sibling_cpus);
 }
 
-struct sched_cluster init_cluster = {
-	.list			=	LIST_HEAD_INIT(init_cluster.list),
-	.id			=	0,
-	.max_power_cost		=	1,
-	.min_power_cost		=	1,
-	.max_possible_capacity	=	1024,
-	.efficiency		=	1,
-	.cur_freq		=	1,
-	.max_freq		=	1,
-	.max_mitigated_freq	=	UINT_MAX,
-	.min_freq		=	1,
-	.max_possible_freq	=	1,
-	.exec_scale_factor	=	1024,
-	.aggr_grp_load		=	0,
-};
-
-void init_clusters(void)
-{
-	bitmap_clear(all_cluster_ids, 0, NR_CPUS);
-	init_cluster.cpus = *cpu_possible_mask;
-	raw_spin_lock_init(&init_cluster.load_lock);
-	INIT_LIST_HEAD(&cluster_head);
-}
-
 static unsigned long cpu_max_table_freq[NR_CPUS];
 
 static int cpufreq_notifier_policy(struct notifier_block *nb,
@@ -2459,8 +2518,8 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
 	if (min_max_freq == 1)
 		min_max_freq = UINT_MAX;
 	min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
-	BUG_ON(!min_max_freq);
-	BUG_ON(!policy->max);
+	SCHED_BUG_ON(!min_max_freq);
+	SCHED_BUG_ON(!policy->max);
 
 	for_each_cpu(i, &policy_cluster)
 		cpu_max_table_freq[i] = policy->cpuinfo.max_freq;
@@ -3131,11 +3190,46 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
 		src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
 		dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
 
+		if (*src_curr_runnable_sum < p->ravg.curr_window_cpu[cpu]) {
+			printk_deferred("WALT-BUG pid=%u CPU=%d event=%d src_crs=%llu is lesser than task_contrib=%llu",
+				p->pid, cpu, event, *src_curr_runnable_sum,
+				p->ravg.curr_window_cpu[cpu]);
+			walt_task_dump(p);
+			SCHED_BUG_ON(1);
+		}
 		*src_curr_runnable_sum -= p->ravg.curr_window_cpu[cpu];
+
+		if (*src_prev_runnable_sum < p->ravg.prev_window_cpu[cpu]) {
+			printk_deferred("WALT-BUG pid=%u CPU=%d event=%d src_prs=%llu is lesser than task_contrib=%llu",
+				p->pid, cpu, event, *src_prev_runnable_sum,
+				p->ravg.prev_window_cpu[cpu]);
+			walt_task_dump(p);
+			SCHED_BUG_ON(1);
+		}
 		*src_prev_runnable_sum -= p->ravg.prev_window_cpu[cpu];
+
 		if (new_task) {
+			if (*src_nt_curr_runnable_sum <
+					p->ravg.curr_window_cpu[cpu]) {
+				printk_deferred("WALT-BUG pid=%u CPU=%d event=%d src_nt_crs=%llu is lesser than task_contrib=%llu",
+					p->pid, cpu, event,
+					*src_nt_curr_runnable_sum,
+					p->ravg.curr_window_cpu[cpu]);
+				walt_task_dump(p);
+				SCHED_BUG_ON(1);
+			}
 			*src_nt_curr_runnable_sum -=
 					p->ravg.curr_window_cpu[cpu];
+
+			if (*src_nt_prev_runnable_sum <
+					p->ravg.prev_window_cpu[cpu]) {
+				printk_deferred("WALT-BUG pid=%u CPU=%d event=%d src_nt_prs=%llu is lesser than task_contrib=%llu",
+					p->pid, cpu, event,
+					*src_nt_prev_runnable_sum,
+					p->ravg.prev_window_cpu[cpu]);
+				walt_task_dump(p);
+				SCHED_BUG_ON(1);
+			}
 			*src_nt_prev_runnable_sum -=
 					p->ravg.prev_window_cpu[cpu];
 		}
@@ -3156,10 +3250,43 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
 		src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
 		dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
 
+		if (*src_curr_runnable_sum < p->ravg.curr_window) {
+			printk_deferred("WALT-UG pid=%u CPU=%d event=%d src_crs=%llu is lesser than task_contrib=%llu",
+				p->pid, cpu, event, *src_curr_runnable_sum,
+				p->ravg.curr_window);
+			walt_task_dump(p);
+			SCHED_BUG_ON(1);
+		}
 		*src_curr_runnable_sum -= p->ravg.curr_window;
+
+		if (*src_prev_runnable_sum < p->ravg.prev_window) {
+			printk_deferred("WALT-BUG pid=%u CPU=%d event=%d src_prs=%llu is lesser than task_contrib=%llu",
+				p->pid, cpu, event, *src_prev_runnable_sum,
+				p->ravg.prev_window);
+			walt_task_dump(p);
+			SCHED_BUG_ON(1);
+		}
 		*src_prev_runnable_sum -= p->ravg.prev_window;
+
 		if (new_task) {
+			if (*src_nt_curr_runnable_sum < p->ravg.curr_window) {
+				printk_deferred("WALT-BUG pid=%u CPU=%d event=%d src_nt_crs=%llu is lesser than task_contrib=%llu",
+					p->pid, cpu, event,
+					*src_nt_curr_runnable_sum,
+					p->ravg.curr_window);
+				walt_task_dump(p);
+				SCHED_BUG_ON(1);
+			}
 			*src_nt_curr_runnable_sum -= p->ravg.curr_window;
+
+			if (*src_nt_prev_runnable_sum < p->ravg.prev_window) {
+				printk_deferred("WALT-BUG pid=%u CPU=%d event=%d src_nt_prs=%llu is lesser than task_contrib=%llu",
+					p->pid, cpu, event,
+					*src_nt_prev_runnable_sum,
+					p->ravg.prev_window);
+				walt_task_dump(p);
+				SCHED_BUG_ON(1);
+			}
 			*src_nt_prev_runnable_sum -= p->ravg.prev_window;
 		}
 
@@ -3192,11 +3319,6 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
 	p->ravg.prev_window_cpu[cpu] = p->ravg.prev_window;
 
 	trace_sched_migration_update_sum(p, migrate_type, rq);
-
-	BUG_ON((s64)*src_curr_runnable_sum < 0);
-	BUG_ON((s64)*src_prev_runnable_sum < 0);
-	BUG_ON((s64)*src_nt_curr_runnable_sum < 0);
-	BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
 }
 
 bool is_rtgb_active(void)
@@ -3220,6 +3342,13 @@ u64 get_rtgb_active_time(void)
 	return 0;
 }
 
+static void walt_init_window_dep(void);
+static void walt_tunables_fixup(void)
+{
+	walt_update_group_thresholds();
+	walt_init_window_dep();
+}
+
 /*
  * Runs in hard-irq context. This should ideally run just after the latest
  * window roll-over.
@@ -3325,6 +3454,22 @@ void walt_irq_work(struct irq_work *irq_work)
 		}
 	}
 
+	/*
+	 * If the window change request is in pending, good place to
+	 * change sched_ravg_window since all rq locks are acquired.
+	 */
+	if (!is_migration) {
+		if (sched_ravg_window != new_sched_ravg_window) {
+			sched_ravg_window_change_time = sched_ktime_clock();
+			printk_deferred("ALERT: changing window size from %u to %u at %lu\n",
+					sched_ravg_window,
+					new_sched_ravg_window,
+					sched_ravg_window_change_time);
+			sched_ravg_window = new_sched_ravg_window;
+			walt_tunables_fixup();
+		}
+	}
+
 	for_each_cpu(cpu, cpu_possible_mask)
 		raw_spin_unlock(&cpu_rq(cpu)->lock);
 
@@ -3437,12 +3582,8 @@ int walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
 	return ret;
 }
 
-static void walt_init_once(void)
+static void walt_init_window_dep(void)
 {
-	init_irq_work(&walt_migration_irq_work, walt_irq_work);
-	init_irq_work(&walt_cpufreq_irq_work, walt_irq_work);
-	walt_rotate_work_init();
-
 	walt_cpu_util_freq_divisor =
 	    (sched_ravg_window >> SCHED_CAPACITY_SHIFT) * 100;
 	walt_scale_demand_divisor = sched_ravg_window >> SCHED_CAPACITY_SHIFT;
@@ -3454,6 +3595,14 @@ static void walt_init_once(void)
 		scale_demand(sched_init_task_load_windows);
 }
 
+static void walt_init_once(void)
+{
+	init_irq_work(&walt_migration_irq_work, walt_irq_work);
+	init_irq_work(&walt_cpufreq_irq_work, walt_irq_work);
+	walt_rotate_work_init();
+	walt_init_window_dep();
+}
+
 void walt_sched_init_rq(struct rq *rq)
 {
 	int j;
@@ -3464,6 +3613,7 @@ void walt_sched_init_rq(struct rq *rq)
 	cpumask_set_cpu(cpu_of(rq), &rq->freq_domain_cpumask);
 
 	rq->walt_stats.cumulative_runnable_avg_scaled = 0;
+	rq->prev_window_size = sched_ravg_window;
 	rq->window_start = 0;
 	rq->walt_stats.nr_big_tasks = 0;
 	rq->walt_flags = 0;
@@ -3527,3 +3677,154 @@ int walt_proc_user_hint_handler(struct ctl_table *table,
 	mutex_unlock(&mutex);
 	return ret;
 }
+
+static inline void sched_window_nr_ticks_change(int new_nr_ticks)
+{
+	new_sched_ravg_window = new_nr_ticks * (NSEC_PER_SEC / HZ);
+}
+
+int sched_ravg_window_handler(struct ctl_table *table,
+				int write, void __user *buffer, size_t *lenp,
+				loff_t *ppos)
+{
+	int ret;
+	static DEFINE_MUTEX(mutex);
+	unsigned int prev_value;
+
+	mutex_lock(&mutex);
+
+	prev_value = sysctl_sched_ravg_window_nr_ticks;
+	ret = proc_douintvec_ravg_window(table, write, buffer, lenp, ppos);
+	if (ret || !write ||
+			(prev_value == sysctl_sched_ravg_window_nr_ticks) ||
+			(sysctl_sched_ravg_window_nr_ticks == 0))
+		goto unlock;
+
+	sched_window_nr_ticks_change(sysctl_sched_ravg_window_nr_ticks);
+
+unlock:
+	mutex_unlock(&mutex);
+	return ret;
+}
+
+void sched_set_refresh_rate(enum fps fps)
+{
+	int new_nr_ticks;
+
+	if (HZ == 250) {
+		if (fps > FPS90)
+			new_nr_ticks = 2;
+		else if (fps == FPS90)
+			new_nr_ticks = 3;
+		else
+			new_nr_ticks = 5;
+		sched_window_nr_ticks_change(new_nr_ticks);
+	}
+}
+EXPORT_SYMBOL(sched_set_refresh_rate);
+
+/* Migration margins */
+unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS] = {
+			[0 ... MAX_MARGIN_LEVELS-1] = 1078}; /* ~5% margin */
+unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS] = {
+			[0 ... MAX_MARGIN_LEVELS-1] = 1205}; /* ~15% margin */
+
+#ifdef CONFIG_PROC_SYSCTL
+static void sched_update_updown_migrate_values(bool up)
+{
+	int i = 0, cpu;
+	struct sched_cluster *cluster;
+	int cap_margin_levels = num_sched_clusters - 1;
+
+	if (cap_margin_levels > 1) {
+		/*
+		 * No need to worry about CPUs in last cluster
+		 * if there are more than 2 clusters in the system
+		 */
+		for_each_sched_cluster(cluster) {
+			for_each_cpu(cpu, &cluster->cpus) {
+
+				if (up)
+					sched_capacity_margin_up[cpu] =
+					sysctl_sched_capacity_margin_up[i];
+				else
+					sched_capacity_margin_down[cpu] =
+					sysctl_sched_capacity_margin_down[i];
+			}
+
+			if (++i >= cap_margin_levels)
+				break;
+		}
+	} else {
+		for_each_possible_cpu(cpu) {
+			if (up)
+				sched_capacity_margin_up[cpu] =
+					sysctl_sched_capacity_margin_up[0];
+			else
+				sched_capacity_margin_down[cpu] =
+					sysctl_sched_capacity_margin_down[0];
+		}
+	}
+}
+
+int sched_updown_migrate_handler(struct ctl_table *table, int write,
+				void __user *buffer, size_t *lenp,
+				loff_t *ppos)
+{
+	int ret, i;
+	unsigned int *data = (unsigned int *)table->data;
+	unsigned int *old_val;
+	static DEFINE_MUTEX(mutex);
+	int cap_margin_levels = num_sched_clusters ? num_sched_clusters - 1 : 0;
+
+	if (cap_margin_levels <= 0)
+		return -EINVAL;
+
+	mutex_lock(&mutex);
+
+	if (table->maxlen != (sizeof(unsigned int) * cap_margin_levels))
+		table->maxlen = sizeof(unsigned int) * cap_margin_levels;
+
+	if (!write) {
+		ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
+		goto unlock_mutex;
+	}
+
+	/*
+	 * Cache the old values so that they can be restored
+	 * if either the write fails (for example out of range values)
+	 * or the downmigrate and upmigrate are not in sync.
+	 */
+	old_val = kmemdup(data, table->maxlen, GFP_KERNEL);
+	if (!old_val) {
+		ret = -ENOMEM;
+		goto unlock_mutex;
+	}
+
+	ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos);
+
+	if (ret) {
+		memcpy(data, old_val, table->maxlen);
+		goto free_old_val;
+	}
+
+	for (i = 0; i < cap_margin_levels; i++) {
+		if (sysctl_sched_capacity_margin_up[i] >
+				sysctl_sched_capacity_margin_down[i]) {
+			memcpy(data, old_val, table->maxlen);
+			ret = -EINVAL;
+			goto free_old_val;
+		}
+	}
+
+	sched_update_updown_migrate_values(data ==
+					&sysctl_sched_capacity_margin_up[0]);
+
+free_old_val:
+	kfree(old_val);
+unlock_mutex:
+	mutex_unlock(&mutex);
+
+	return ret;
+}
+#endif
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 7fe6726..5a150f5a 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -28,13 +28,15 @@
 #define for_each_related_thread_group(grp) \
 	list_for_each_entry(grp, &active_related_thread_groups, list)
 
-#define SCHED_NEW_TASK_WINDOWS 5
+#define NEW_TASK_ACTIVE_TIME 100000000
 
 extern unsigned int sched_ravg_window;
+extern unsigned int new_sched_ravg_window;
 extern unsigned int max_possible_efficiency;
 extern unsigned int min_possible_efficiency;
 extern unsigned int max_possible_freq;
 extern unsigned int __read_mostly sched_load_granule;
+extern u64 sched_ravg_window_change_time;
 
 extern struct mutex cluster_lock;
 extern rwlock_t related_thread_group_lock;
@@ -43,8 +45,6 @@ extern __read_mostly unsigned int sched_freq_aggregate;
 extern __read_mostly unsigned int sched_group_upmigrate;
 extern __read_mostly unsigned int sched_group_downmigrate;
 
-extern struct sched_cluster init_cluster;
-
 extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
 						u64 wallclock, u64 irqtime);
 
@@ -194,7 +194,7 @@ scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
 
 static inline bool is_new_task(struct task_struct *p)
 {
-	return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS;
+	return p->ravg.active_time <= NEW_TASK_ACTIVE_TIME;
 }
 
 static inline void clear_top_tasks_table(u8 *table)
@@ -324,6 +324,102 @@ static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
 
 extern bool is_rtgb_active(void);
 extern u64 get_rtgb_active_time(void);
+#define SCHED_PRINT(arg)        printk_deferred("%s=%llu", #arg, arg)
+#define STRG(arg)               #arg
+
+static inline void walt_task_dump(struct task_struct *p)
+{
+	char buff[NR_CPUS * 16];
+	int i, j = 0;
+	int buffsz = NR_CPUS * 16;
+
+	SCHED_PRINT(p->pid);
+	SCHED_PRINT(p->ravg.mark_start);
+	SCHED_PRINT(p->ravg.demand);
+	SCHED_PRINT(p->ravg.coloc_demand);
+	SCHED_PRINT(sched_ravg_window);
+	SCHED_PRINT(new_sched_ravg_window);
+
+	for (i = 0 ; i < nr_cpu_ids; i++)
+		j += scnprintf(buff + j, buffsz - j, "%u ",
+				p->ravg.curr_window_cpu[i]);
+	printk_deferred("%s=%d (%s)\n", STRG(p->ravg.curr_window),
+			p->ravg.curr_window, buff);
+
+	for (i = 0, j = 0 ; i < nr_cpu_ids; i++)
+		j += scnprintf(buff + j, buffsz - j, "%u ",
+				p->ravg.prev_window_cpu[i]);
+	printk_deferred("%s=%d (%s)\n", STRG(p->ravg.prev_window),
+			p->ravg.prev_window, buff);
+
+	SCHED_PRINT(p->last_wake_ts);
+	SCHED_PRINT(p->last_enqueued_ts);
+	SCHED_PRINT(p->misfit);
+	SCHED_PRINT(p->unfilter);
+}
+
+static inline void walt_rq_dump(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	struct task_struct *tsk = cpu_curr(cpu);
+	int i;
+
+	printk_deferred("CPU:%d nr_running:%u current: %d (%s)\n",
+			cpu, rq->nr_running, tsk->pid, tsk->comm);
+
+	printk_deferred("==========================================");
+	SCHED_PRINT(rq->window_start);
+	SCHED_PRINT(rq->prev_window_size);
+	SCHED_PRINT(rq->curr_runnable_sum);
+	SCHED_PRINT(rq->prev_runnable_sum);
+	SCHED_PRINT(rq->nt_curr_runnable_sum);
+	SCHED_PRINT(rq->nt_prev_runnable_sum);
+	SCHED_PRINT(rq->cum_window_demand_scaled);
+	SCHED_PRINT(rq->cc.time);
+	SCHED_PRINT(rq->cc.cycles);
+	SCHED_PRINT(rq->grp_time.curr_runnable_sum);
+	SCHED_PRINT(rq->grp_time.prev_runnable_sum);
+	SCHED_PRINT(rq->grp_time.nt_curr_runnable_sum);
+	SCHED_PRINT(rq->grp_time.nt_prev_runnable_sum);
+	for (i = 0 ; i < NUM_TRACKED_WINDOWS; i++) {
+		printk_deferred("rq->load_subs[%d].window_start=%llu)\n", i,
+				rq->load_subs[i].window_start);
+		printk_deferred("rq->load_subs[%d].subs=%llu)\n", i,
+				rq->load_subs[i].subs);
+		printk_deferred("rq->load_subs[%d].new_subs=%llu)\n", i,
+				rq->load_subs[i].new_subs);
+	}
+	walt_task_dump(tsk);
+	SCHED_PRINT(sched_capacity_margin_up[cpu]);
+	SCHED_PRINT(sched_capacity_margin_down[cpu]);
+}
+
+static inline void walt_dump(void)
+{
+	int cpu;
+
+	printk_deferred("============ WALT RQ DUMP START ==============\n");
+	printk_deferred("Sched ktime_get: %llu\n", sched_ktime_clock());
+	printk_deferred("Time last window changed=%lu\n",
+			sched_ravg_window_change_time);
+	for_each_online_cpu(cpu) {
+		walt_rq_dump(cpu);
+	}
+	SCHED_PRINT(max_possible_capacity);
+	SCHED_PRINT(min_max_possible_capacity);
+
+	printk_deferred("============ WALT RQ DUMP END ==============\n");
+}
+
+static int in_sched_bug;
+#define SCHED_BUG_ON(condition)				\
+({							\
+	if (unlikely(!!(condition)) && !in_sched_bug) {	\
+		in_sched_bug = 1;			\
+		walt_dump();				\
+		BUG_ON(condition);			\
+	}						\
+})
 
 #else /* CONFIG_SCHED_WALT */
 
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index acdccea..e4d61ff 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -520,8 +520,13 @@ static struct ctl_table kern_table[] = {
 		.extra1		= &zero,
 		.extra2		= &one_hundred_thousand,
 	},
-#endif
-#ifdef CONFIG_SMP
+	{
+		.procname	= "sched_ravg_window_nr_ticks",
+		.data		= &sysctl_sched_ravg_window_nr_ticks,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_ravg_window_handler,
+	},
 	{
 		.procname	= "sched_upmigrate",
 		.data		= &sysctl_sched_capacity_margin_up,
@@ -3553,6 +3558,29 @@ int proc_douintvec_capacity(struct ctl_table *table, int write,
 				do_proc_douintvec_capacity_conv, NULL);
 }
 
+static int do_proc_douintvec_rwin(bool *negp, unsigned long *lvalp,
+				  int *valp, int write, void *data)
+{
+	if (write) {
+		if (*lvalp == 0 || *lvalp == 2 || *lvalp == 5)
+			*valp = *lvalp;
+		else
+			return -EINVAL;
+	} else {
+		*negp = false;
+		*lvalp = *valp;
+	}
+
+	return 0;
+}
+
+int proc_douintvec_ravg_window(struct ctl_table *table, int write,
+			       void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	return do_proc_dointvec(table, write, buffer, lenp, ppos,
+				do_proc_douintvec_rwin, NULL);
+}
+
 #else /* CONFIG_PROC_SYSCTL */
 
 int proc_dostring(struct ctl_table *table, int write,
@@ -3622,6 +3650,12 @@ int proc_douintvec_capacity(struct ctl_table *table, int write,
 	return -ENOSYS;
 }
 
+int proc_douintvec_ravg_window(struct ctl_table *table, int write,
+			       void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	return -ENOSYS;
+}
+
 #endif /* CONFIG_PROC_SYSCTL */
 
 /*
diff --git a/mm/cma.c b/mm/cma.c
index d3973af..445902a 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -456,7 +456,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 	struct page *page = NULL;
 	int ret = -ENOMEM;
 	int retry_after_sleep = 0;
-	int max_retries = 2;
+	int max_retries = 20;
 	int available_regions = 0;
 
 	if (!cma || !cma->count)
@@ -492,7 +492,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 				 * are less.
 				 */
 				if (available_regions < 3)
-					max_retries = 5;
+					max_retries = 25;
 				available_regions = 0;
 				/*
 				 * Page may be momentarily pinned by some other
diff --git a/mm/memory.c b/mm/memory.c
index b619cc2..4e60e15 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3524,7 +3524,7 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
 
-	if (!pmd_none(*vmf->pmd))
+	if (!pmd_none(*vmf->pmd) || (vmf->flags & FAULT_FLAG_SPECULATIVE))
 		goto map_pte;
 	if (vmf->prealloc_pte) {
 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fe70c43..f52fb42 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2224,11 +2224,38 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
 	return false;
 }
 
+static bool boost_eligible(struct zone *z)
+{
+	unsigned long high_wmark, threshold;
+	unsigned long reclaim_eligible, free_pages;
+
+	high_wmark = z->_watermark[WMARK_HIGH];
+	reclaim_eligible = zone_page_state_snapshot(z, NR_ZONE_INACTIVE_FILE) +
+			zone_page_state_snapshot(z, NR_ZONE_ACTIVE_FILE);
+	free_pages = zone_page_state(z, NR_FREE_PAGES) -
+			zone_page_state(z, NR_FREE_CMA_PAGES);
+	threshold = high_wmark + (2 * mult_frac(high_wmark,
+					watermark_boost_factor, 10000));
+
+	/*
+	 * Don't boost watermark If we are already low on memory where the
+	 * boosting can simply put the watermarks at higher levels for a
+	 * longer duration of time and thus the other users relied on the
+	 * watermarks are forced to choose unintended decissions. If memory
+	 * is so low, kswapd in normal mode should help.
+	 */
+
+	if (reclaim_eligible < threshold && free_pages < threshold)
+		return false;
+
+	return true;
+}
+
 static inline void boost_watermark(struct zone *zone)
 {
 	unsigned long max_boost;
 
-	if (!watermark_boost_factor)
+	if (!watermark_boost_factor || !boost_eligible(zone))
 		return;
 
 	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
@@ -3591,6 +3618,20 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
 		}
 
 		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
+		/*
+		 * Allow high, atomic, harder order-0 allocation requests
+		 * to skip the ->watermark_boost for min watermark check.
+		 * In doing so, check for:
+		 *  1) ALLOC_WMARK_MIN - Allow to wake up kswapd in the
+		 *			 slow path.
+		 *  2) ALLOC_HIGH - Allow high priority requests.
+		 *  3) ALLOC_HARDER - Allow (__GFP_ATOMIC && !__GFP_NOMEMALLOC),
+		 *			of the others.
+		 */
+		if (unlikely(!order && (alloc_flags & ALLOC_WMARK_MIN) &&
+		     (alloc_flags & (ALLOC_HARDER | ALLOC_HIGH)))) {
+			mark = zone->_watermark[WMARK_MIN];
+		}
 		if (!zone_watermark_fast(zone, order, mark,
 				       ac_classzone_idx(ac), alloc_flags)) {
 			int ret;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index fd1d172..dc2287c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1568,9 +1568,9 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
 		   "\n        present  %lu"
 		   "\n        managed  %lu",
 		   zone_page_state(zone, NR_FREE_PAGES),
-		   min_wmark_pages(zone) - zone->watermark_boost,
-		   low_wmark_pages(zone) - zone->watermark_boost,
-		   high_wmark_pages(zone) - zone->watermark_boost,
+		   min_wmark_pages(zone),
+		   low_wmark_pages(zone),
+		   high_wmark_pages(zone),
 		   zone->spanned_pages,
 		   zone->present_pages,
 		   zone->managed_pages);
diff --git a/techpack/Kbuild b/techpack/Kbuild
index 1d4274d..d5f247d 100644
--- a/techpack/Kbuild
+++ b/techpack/Kbuild
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 TECHPACK?=y
 
-techpack-dirs := $(shell find $(srctree)/$(src) -maxdepth 1 -mindepth 1 -xtype d -not -name ".*")
-obj-${TECHPACK} += stub/ $(addsuffix /,$(subst $(srctree)/$(src)/,,$(techpack-dirs)))
+techpack-dirs := $(shell find $(srctree)/techpack -maxdepth 1 -mindepth 1 -type d -not -name ".*")
+obj-${TECHPACK} += stub/ $(addsuffix /,$(subst $(srctree)/techpack/,,$(techpack-dirs)))
 
 techpack-header-dirs := $(shell find $(srctree)/techpack -maxdepth 1 -mindepth 1 -type d -not -name ".*")
 header-${TECHPACK} += $(addsuffix /include/uapi/,$(subst $(srctree)/techpack/,,$(techpack-header-dirs)))