Merge LA.UM.9.12.R2.10.00.00.685.039 via branch 'qcom-msm-4.19-7250' into android-msm-pixel-4.19

 Conflicts:
	msm/dsi/dsi_display.c
	msm/dsi/dsi_panel.c
	msm/msm_atomic.c

Bug: 172988823
Signed-off-by: lucaswei <lucaswei@google.com>
Change-Id: I1312736da70634a1fbb66efa29a07acc89b32f21
diff --git a/msm/dp/dp_ctrl.c b/msm/dp/dp_ctrl.c
index 2870cda..88c4192 100644
--- a/msm/dp/dp_ctrl.c
+++ b/msm/dp/dp_ctrl.c
@@ -996,14 +996,15 @@
 	u64 raw_target_sc, target_sc_fixp;
 	u64 ts_denom, ts_enum, ts_int;
 	u64 pclk = panel->pinfo.pixel_clk_khz;
-	u64 lclk = panel->link_info.rate;
-	u64 lanes = panel->link_info.num_lanes;
+	u64 lclk = 0;
+	u64 lanes = ctrl->link->link_params.lane_count;
 	u64 bpp = panel->pinfo.bpp;
 	u64 pbn = panel->pbn;
 	u64 numerator, denominator, temp, temp1, temp2;
 	u32 x_int = 0, y_frac_enum = 0;
 	u64 target_strm_sym, ts_int_fixp, ts_frac_fixp, y_frac_enum_fixp;
 
+	lclk = drm_dp_bw_code_to_link_rate(ctrl->link->link_params.bw_code);
 	if (panel->pinfo.comp_info.comp_ratio)
 		bpp = panel->pinfo.comp_info.dsc_info.bpp;
 
@@ -1170,6 +1171,11 @@
 
 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
 
+	if (!ctrl->power_on) {
+		DP_ERR("ctrl off\n");
+		return -EINVAL;
+	}
+
 	rc = dp_ctrl_enable_stream_clocks(ctrl, panel);
 	if (rc) {
 		DP_ERR("failure on stream clock enable\n");
diff --git a/msm/dp/dp_display.c b/msm/dp/dp_display.c
index c58c7f7..cfae5da 100644
--- a/msm/dp/dp_display.c
+++ b/msm/dp/dp_display.c
@@ -712,7 +712,7 @@
 	snprintf(pattern, HPD_STRING_SIZE, "pattern=%d",
 		dp->link->test_video.test_video_pattern);
 
-	DP_DEBUG("[%s]:[%s] [%s] [%s]\n", name, status, bpp, pattern);
+	DP_INFO("[%s]:[%s] [%s] [%s]\n", name, status, bpp, pattern);
 	envp[0] = name;
 	envp[1] = status;
 	envp[2] = bpp;
@@ -1167,6 +1167,12 @@
 		return;
 	}
 
+	if (dp_panel->stream_id == DP_STREAM_MAX ||
+			!dp->active_panels[dp_panel->stream_id]) {
+		DP_ERR("panel is already disabled\n");
+		return;
+	}
+
 	DP_DEBUG("stream_id=%d, active_stream_cnt=%d\n",
 			dp_panel->stream_id, dp->active_stream_cnt);
 
@@ -1893,7 +1899,7 @@
 	mutex_unlock(&dp->session_lock);
 
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
-	return 0;
+	return rc;
 }
 
 static int dp_display_set_stream_info(struct dp_display *dp_display,
@@ -2309,7 +2315,6 @@
 		const struct msm_resource_caps_info *avail_res)
 {
 	struct dp_display_private *dp;
-	struct drm_dp_link *link_info;
 	u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
 	struct dp_panel *dp_panel;
 	struct dp_debug *debug;
@@ -2338,8 +2343,6 @@
 		goto end;
 	}
 
-	link_info = &dp->panel->link_info;
-
 	debug = dp->debug;
 	if (!debug)
 		goto end;
@@ -2352,7 +2355,7 @@
 
 	mode_rate_khz = mode->clock * mode_bpp;
 	rate = drm_dp_bw_code_to_link_rate(dp->link->link_params.bw_code);
-	supported_rate_khz = link_info->num_lanes * rate * 8;
+	supported_rate_khz = dp->link->link_params.lane_count * rate * 8;
 	tmds_max_clock = dp_panel->connector->display_info.max_tmds_clock;
 
 	if (mode_rate_khz > supported_rate_khz) {
@@ -2539,6 +2542,11 @@
 		return -EINVAL;
 	}
 
+	if (!dp_display_state_is(DP_STATE_ENABLED)) {
+		dp_display_state_show("[not enabled]");
+		return 0;
+	}
+
 	/*
 	 * In rare cases where HDR metadata is updated independently
 	 * flush the HDR metadata immediately instead of relying on
@@ -2560,12 +2568,20 @@
 		u32 colorspace)
 {
 	struct dp_panel *dp_panel;
+	struct dp_display_private *dp;
 
 	if (!dp_display || !panel) {
 		pr_err("invalid input\n");
 		return -EINVAL;
 	}
 
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	if (!dp_display_state_is(DP_STATE_ENABLED)) {
+		dp_display_state_show("[not enabled]");
+		return 0;
+	}
+
 	dp_panel = panel;
 
 	return dp_panel->set_colorspace(dp_panel, colorspace);
@@ -2898,6 +2914,11 @@
 		return -EINVAL;
 	}
 
+	if (!dp_display_state_is(DP_STATE_ENABLED)) {
+		dp_display_state_show("[not enabled]");
+		return 0;
+	}
+
 	dp_panel = sde_conn->drv_panel;
 	dp_panel->update_pps(dp_panel, pps_cmd);
 	return 0;
diff --git a/msm/dp/dp_panel.c b/msm/dp/dp_panel.c
index b31103a..74f915b 100644
--- a/msm/dp/dp_panel.c
+++ b/msm/dp/dp_panel.c
@@ -1355,8 +1355,11 @@
 	int tot_num_hor_bytes, tot_num_dummy_bytes;
 	int dwidth_dsc_bytes, eoc_bytes;
 	u32 num_lanes;
+	struct dp_panel_private *panel;
 
-	num_lanes = dp_panel->link_info.num_lanes;
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	num_lanes = panel->link->link_params.lane_count;
 	num_slices = dsc->slice_per_pkt;
 
 	eoc_bytes = dsc_byte_cnt % num_lanes;
@@ -2171,18 +2174,23 @@
 static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
 		u32 mode_edid_bpp, u32 mode_pclk_khz)
 {
-	struct drm_dp_link *link_info;
+	struct dp_link_params *link_params;
+	struct dp_panel_private *panel;
 	const u32 max_supported_bpp = 30;
 	u32 min_supported_bpp = 18;
 	u32 bpp = 0, data_rate_khz = 0, tmds_max_clock = 0;
 
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
 	if (dp_panel->dsc_en)
 		min_supported_bpp = 24;
 
 	bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
 
-	link_info = &dp_panel->link_info;
-	data_rate_khz = link_info->num_lanes * link_info->rate * 8;
+	link_params = &panel->link->link_params;
+
+	data_rate_khz = link_params->lane_count *
+		drm_dp_bw_code_to_link_rate(link_params->bw_code) * 8;
 	tmds_max_clock = dp_panel->connector->display_info.max_tmds_clock;
 
 	for (; bpp > min_supported_bpp; bpp -= 6) {
@@ -2680,32 +2688,6 @@
 	return rc;
 }
 
-static u32 dp_panel_get_min_req_link_rate(struct dp_panel *dp_panel)
-{
-	const u32 encoding_factx10 = 8;
-	u32 min_link_rate_khz = 0, lane_cnt;
-	struct dp_panel_info *pinfo;
-
-	if (!dp_panel) {
-		DP_ERR("invalid input\n");
-		goto end;
-	}
-
-	lane_cnt = dp_panel->link_info.num_lanes;
-	pinfo = &dp_panel->pinfo;
-
-	/* num_lanes * lane_count * 8 >= pclk * bpp * 10 */
-	min_link_rate_khz = pinfo->pixel_clk_khz /
-				(lane_cnt * encoding_factx10);
-	min_link_rate_khz *= pinfo->bpp;
-
-	DP_DEBUG("min lclk req=%d khz for pclk=%d khz, lanes=%d, bpp=%d\n",
-		min_link_rate_khz, pinfo->pixel_clk_khz, lane_cnt,
-		pinfo->bpp);
-end:
-	return min_link_rate_khz;
-}
-
 static bool dp_panel_hdr_supported(struct dp_panel *dp_panel)
 {
 	struct dp_panel_private *panel;
@@ -2966,8 +2948,9 @@
 		dp_panel_setup_dhdr_vsif(panel);
 
 		input.mdp_clk = core_clk_rate;
-		input.lclk = dp_panel->link_info.rate;
-		input.nlanes = dp_panel->link_info.num_lanes;
+		input.lclk = drm_dp_bw_code_to_link_rate(
+				panel->link->link_params.bw_code);
+		input.nlanes = panel->link->link_params.lane_count;
 		input.pclk = dp_panel->pinfo.pixel_clk_khz;
 		input.h_active = dp_panel->pinfo.h_active;
 		input.mst_target_sc = dp_panel->mst_target_sc;
@@ -3371,7 +3354,6 @@
 	dp_panel->deinit = dp_panel_deinit_panel_info;
 	dp_panel->hw_cfg = dp_panel_hw_cfg;
 	dp_panel->read_sink_caps = dp_panel_read_sink_caps;
-	dp_panel->get_min_req_link_rate = dp_panel_get_min_req_link_rate;
 	dp_panel->get_mode_bpp = dp_panel_get_mode_bpp;
 	dp_panel->get_modes = dp_panel_get_modes;
 	dp_panel->handle_sink_request = dp_panel_handle_sink_request;
diff --git a/msm/dp/dp_panel.h b/msm/dp/dp_panel.h
index 212b615..dbc5ba9 100644
--- a/msm/dp/dp_panel.h
+++ b/msm/dp/dp_panel.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _DP_PANEL_H_
@@ -141,7 +141,6 @@
 	int (*hw_cfg)(struct dp_panel *dp_panel, bool enable);
 	int (*read_sink_caps)(struct dp_panel *dp_panel,
 		struct drm_connector *connector, bool multi_func);
-	u32 (*get_min_req_link_rate)(struct dp_panel *dp_panel);
 	u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp,
 			u32 mode_pclk_khz);
 	int (*get_modes)(struct dp_panel *dp_panel,
diff --git a/msm/dp/dp_usbpd.c b/msm/dp/dp_usbpd.c
index 030fe61..f49df59 100644
--- a/msm/dp/dp_usbpd.c
+++ b/msm/dp/dp_usbpd.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/usb/usbpd.h>
@@ -246,7 +246,7 @@
 		return;
 	}
 
-	DP_DEBUG("peer_usb_comm: %d\n");
+	DP_DEBUG("peer_usb_comm: %d\n", peer_usb_comm);
 	pd->dp_usbpd.base.peer_usb_comm = peer_usb_comm;
 	dp_usbpd_send_event(pd, DP_USBPD_EVT_DISCOVER);
 }
@@ -403,6 +403,7 @@
 	case DP_USBPD_VDM_CONFIGURE:
 		pd->alt_mode |= DP_USBPD_ALT_MODE_CONFIGURE;
 		pd->dp_usbpd.base.alt_mode_cfg_done = true;
+		pd->forced_disconnect = false;
 		dp_usbpd_get_status(pd);
 
 		pd->dp_usbpd.base.orientation =
diff --git a/msm/dsi/dsi_catalog.c b/msm/dsi/dsi_catalog.c
index f26c729..19fb900 100644
--- a/msm/dsi/dsi_catalog.c
+++ b/msm/dsi/dsi_catalog.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/errno.h>
@@ -81,6 +81,8 @@
 		ctrl->ops.schedule_dma_cmd = NULL;
 		ctrl->ops.kickoff_command_non_embedded_mode = NULL;
 		ctrl->ops.config_clk_gating = NULL;
+		ctrl->ops.map_mdp_regs = NULL;
+		ctrl->ops.log_line_count = NULL;
 		break;
 	case DSI_CTRL_VERSION_2_0:
 		ctrl->ops.setup_lane_map = dsi_ctrl_hw_20_setup_lane_map;
@@ -96,6 +98,8 @@
 		ctrl->ops.schedule_dma_cmd = NULL;
 		ctrl->ops.kickoff_command_non_embedded_mode = NULL;
 		ctrl->ops.config_clk_gating = NULL;
+		ctrl->ops.map_mdp_regs = NULL;
+		ctrl->ops.log_line_count = NULL;
 		break;
 	case DSI_CTRL_VERSION_2_2:
 	case DSI_CTRL_VERSION_2_3:
@@ -116,6 +120,8 @@
 		ctrl->ops.schedule_dma_cmd = dsi_ctrl_hw_22_schedule_dma_cmd;
 		ctrl->ops.kickoff_command_non_embedded_mode =
 			dsi_ctrl_hw_kickoff_non_embedded_mode;
+		ctrl->ops.map_mdp_regs = dsi_ctrl_hw_22_map_mdp_regs;
+		ctrl->ops.log_line_count = dsi_ctrl_hw_22_log_line_count;
 		break;
 	default:
 		break;
diff --git a/msm/dsi/dsi_catalog.h b/msm/dsi/dsi_catalog.h
index ed047e9..3118cb5 100644
--- a/msm/dsi/dsi_catalog.h
+++ b/msm/dsi/dsi_catalog.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _DSI_CATALOG_H_
@@ -270,4 +270,9 @@
 
 int dsi_phy_hw_v4_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
 				      u32 *dst, u32 size);
+
+int dsi_ctrl_hw_22_map_mdp_regs(struct platform_device *pdev,
+		struct dsi_ctrl_hw *ctrl);
+
+u32 dsi_ctrl_hw_22_log_line_count(struct dsi_ctrl_hw *ctrl, bool cmd_mode);
 #endif /* _DSI_CATALOG_H_ */
diff --git a/msm/dsi/dsi_clk.h b/msm/dsi/dsi_clk.h
index 1a3928a..ccc1ba7 100644
--- a/msm/dsi/dsi_clk.h
+++ b/msm/dsi/dsi_clk.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _DSI_CLK_H_
@@ -105,10 +105,10 @@
 
 /**
  * struct link_clk_freq - Clock frequency information for Link clocks
- * @byte_clk_rate:   Frequency of DSI byte_clk in KHz.
- * @byte_intf_clk_rate:   Frequency of DSI byte_intf_clk in KHz.
- * @pixel_clk_rate:  Frequency of DSI pixel_clk in KHz.
- * @esc_clk_rate:    Frequency of DSI escape clock in KHz.
+ * @byte_clk_rate:   Frequency of DSI byte_clk in Hz.
+ * @byte_intf_clk_rate:   Frequency of DSI byte_intf_clk in Hz.
+ * @pixel_clk_rate:  Frequency of DSI pixel_clk in Hz.
+ * @esc_clk_rate:    Frequency of DSI escape clock in Hz.
  */
 struct link_clk_freq {
 	u32 byte_clk_rate;
diff --git a/msm/dsi/dsi_ctrl.c b/msm/dsi/dsi_ctrl.c
index 8493fdf..6848962 100644
--- a/msm/dsi/dsi_ctrl.c
+++ b/msm/dsi/dsi_ctrl.c
@@ -295,6 +295,7 @@
 		cancel_work_sync(&dsi_ctrl->dma_cmd_wait);
 	} else {
 		flush_workqueue(dsi_ctrl->dma_cmd_workq);
+		SDE_EVT32(SDE_EVTLOG_FUNC_CASE2);
 	}
 }
 
@@ -347,7 +348,7 @@
 	int rc = 0;
 	struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
 
-	SDE_EVT32(dsi_ctrl->cell_index, op);
+	SDE_EVT32(dsi_ctrl->cell_index, op, op_state);
 
 	switch (op) {
 	case DSI_CTRL_OP_POWER_STATE_CHANGE:
@@ -971,6 +972,7 @@
 	DSI_CTRL_DEBUG(dsi_ctrl, "byte_clk_rate = %llu, byte_intf_clk = %llu\n",
 		  byte_clk_rate, byte_intf_clk_rate);
 	DSI_CTRL_DEBUG(dsi_ctrl, "pclk_rate = %llu\n", pclk_rate);
+	SDE_EVT32(dsi_ctrl->cell_index, bit_rate, byte_clk_rate, pclk_rate);
 
 	dsi_ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
 	dsi_ctrl->clk_freq.byte_intf_clk_rate = byte_intf_clk_rate;
@@ -1144,6 +1146,7 @@
 	 * override cmd fetch mode during secure session
 	 */
 	if (dsi_ctrl->secure_mode) {
+		SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_CASE1);
 		*flags &= ~DSI_CTRL_CMD_FETCH_MEMORY;
 		*flags |= DSI_CTRL_CMD_FIFO_STORE;
 		DSI_CTRL_DEBUG(dsi_ctrl,
@@ -1204,6 +1207,24 @@
 
 	return rc;
 }
+static u32 calculate_schedule_line(struct dsi_ctrl *dsi_ctrl, u32 flags)
+{
+	u32 line_no = 0x1;
+	struct dsi_mode_info *timing;
+
+	/* check if custom dma scheduling line needed */
+	if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
+		(flags & DSI_CTRL_CMD_CUSTOM_DMA_SCHED))
+		line_no = dsi_ctrl->host_config.u.video_engine.dma_sched_line;
+
+	timing = &(dsi_ctrl->host_config.video_timing);
+
+	if (timing)
+		line_no += timing->v_back_porch + timing->v_sync_width +
+				timing->v_active;
+
+	return line_no;
+}
 
 static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
 				const struct mipi_dsi_msg *msg,
@@ -1213,19 +1234,13 @@
 {
 	u32 hw_flags = 0;
 	u32 line_no = 0x1;
-	struct dsi_mode_info *timing;
 	struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;
 
-	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags);
-	/* check if custom dma scheduling line needed */
-	if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
-		(flags & DSI_CTRL_CMD_CUSTOM_DMA_SCHED))
-		line_no = dsi_ctrl->host_config.u.video_engine.dma_sched_line;
+	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags,
+		msg->flags);
 
-	timing = &(dsi_ctrl->host_config.video_timing);
-	if (timing)
-		line_no += timing->v_back_porch + timing->v_sync_width +
-				timing->v_active;
+	line_no = calculate_schedule_line(dsi_ctrl, flags);
+
 	if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
 		dsi_hw_ops.schedule_dma_cmd &&
 		(dsi_ctrl->current_state.vid_engine_state ==
@@ -1233,6 +1248,8 @@
 		dsi_hw_ops.schedule_dma_cmd(&dsi_ctrl->hw,
 				line_no);
 
+	dsi_ctrl->cmd_mode = (dsi_ctrl->host_config.panel_mode ==
+				DSI_OP_CMD_MODE);
 	hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ?
 			DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0;
 
@@ -1261,9 +1278,7 @@
 
 	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
 		dsi_ctrl_wait_for_video_done(dsi_ctrl);
-		if (dsi_hw_ops.mask_error_intr)
-			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
-					BIT(DSI_FIFO_OVERFLOW), true);
+		dsi_ctrl_mask_overflow(dsi_ctrl, true);
 
 		atomic_set(&dsi_ctrl->dma_irq_trig, 0);
 		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
@@ -1296,9 +1311,8 @@
 			dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait);
 		}
 
-		if (dsi_hw_ops.mask_error_intr && !dsi_ctrl->esd_check_underway)
-			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
-					BIT(DSI_FIFO_OVERFLOW), false);
+		dsi_ctrl_mask_overflow(dsi_ctrl, false);
+
 		dsi_hw_ops.reset_cmd_fifo(&dsi_ctrl->hw);
 
 		/*
@@ -2031,6 +2045,9 @@
 		DSI_CTRL_DEBUG(dsi_ctrl, "failed to init axi bus client, rc = %d\n",
 				rc);
 
+	if (dsi_ctrl->hw.ops.map_mdp_regs)
+		dsi_ctrl->hw.ops.map_mdp_regs(pdev, &dsi_ctrl->hw);
+
 	item->ctrl = dsi_ctrl;
 
 	mutex_lock(&dsi_ctrl_list_lock);
@@ -2555,6 +2572,7 @@
 	if ((jiffies_now - dsi_ctrl->jiffies_start) < intr_check_interval) {
 		if (dsi_ctrl->error_interrupt_count > interrupt_threshold) {
 			DSI_CTRL_WARN(dsi_ctrl, "Detected spurious interrupts on dsi ctrl\n");
+			SDE_EVT32_IRQ(dsi_ctrl->error_interrupt_count);
 			return true;
 		}
 	} else {
@@ -2814,7 +2832,7 @@
 			intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
 		return;
 
-	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
+	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, intr_idx);
 	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
 
 	if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) {
@@ -2847,7 +2865,7 @@
 	if (!dsi_ctrl || intr_idx >= DSI_STATUS_INTERRUPT_COUNT)
 		return;
 
-	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
+	SDE_EVT32_IRQ(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, intr_idx);
 	spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags);
 
 	if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx])
@@ -3281,6 +3299,78 @@
 }
 
 /**
+ * dsi_ctrl_mask_overflow() -	API to mask/unmask overflow error.
+ * @dsi_ctrl:			DSI controller handle.
+ * @enable:			variable to control masking/unmasking.
+ */
+void dsi_ctrl_mask_overflow(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+	struct dsi_ctrl_hw_ops dsi_hw_ops;
+
+	dsi_hw_ops = dsi_ctrl->hw.ops;
+
+	if (enable) {
+		if (dsi_hw_ops.mask_error_intr)
+			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
+					BIT(DSI_FIFO_OVERFLOW), true);
+	} else {
+		if (dsi_hw_ops.mask_error_intr && !dsi_ctrl->esd_check_underway)
+			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
+					BIT(DSI_FIFO_OVERFLOW), false);
+	}
+}
+
+/**
+ * dsi_ctrl_clear_slave_dma_status -   API to clear slave DMA status
+ * @dsi_ctrl:                   DSI controller handle.
+ * @flags:                      Modifiers
+ */
+int dsi_ctrl_clear_slave_dma_status(struct dsi_ctrl *dsi_ctrl, u32 flags)
+{
+	struct dsi_ctrl_hw_ops dsi_hw_ops;
+	u32 status;
+	u32 mask = DSI_CMD_MODE_DMA_DONE;
+	int rc = 0, wait_for_done = 5;
+
+	if (!dsi_ctrl) {
+		DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
+		return -EINVAL;
+	}
+
+	/* Return if this is not the last command */
+	if (!(flags & DSI_CTRL_CMD_LAST_COMMAND))
+		return rc;
+
+	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY);
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	dsi_hw_ops = dsi_ctrl->hw.ops;
+
+	while (wait_for_done > 0) {
+		status = dsi_hw_ops.get_interrupt_status(&dsi_ctrl->hw);
+		if (status & mask) {
+			status |= (DSI_CMD_MODE_DMA_DONE | DSI_BTA_DONE);
+			dsi_hw_ops.clear_interrupt_status(&dsi_ctrl->hw,
+				status);
+			SDE_EVT32(dsi_ctrl->cell_index, status);
+			wait_for_done = 1;
+			break;
+		}
+		udelay(10);
+		wait_for_done--;
+	}
+
+	if (wait_for_done == 0)
+		DSI_CTRL_ERR(dsi_ctrl,
+				"DSI1 CMD_MODE_DMA_DONE failed\n");
+
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+	return rc;
+}
+
+/**
  * dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command.
  * @dsi_ctrl:              DSI controller handle.
  * @flags:                 Modifiers.
@@ -3291,6 +3381,10 @@
 {
 	int rc = 0;
 	struct dsi_ctrl_hw_ops dsi_hw_ops;
+	u32 v_total = 0, fps = 0, cur_line = 0, mem_latency_us = 100;
+	u32 line_time = 0, schedule_line = 0x1, latency_by_line = 0;
+	struct dsi_mode_info *timing;
+	unsigned long flag;
 
 	if (!dsi_ctrl) {
 		DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
@@ -3306,22 +3400,60 @@
 
 	mutex_lock(&dsi_ctrl->ctrl_lock);
 
+	timing = &(dsi_ctrl->host_config.video_timing);
+
+	if (timing &&
+		(dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE)) {
+		v_total = timing->v_sync_width + timing->v_back_porch +
+			timing->v_front_porch + timing->v_active;
+		fps = timing->refresh_rate;
+		schedule_line = calculate_schedule_line(dsi_ctrl, flags);
+		line_time = (1000000 / fps) / v_total;
+		latency_by_line = CEIL(mem_latency_us, line_time);
+	}
+
 	if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER))
 		dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
 
 	if ((flags & DSI_CTRL_CMD_BROADCAST) &&
 		(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
 		dsi_ctrl_wait_for_video_done(dsi_ctrl);
-		if (dsi_hw_ops.mask_error_intr)
-			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
-					BIT(DSI_FIFO_OVERFLOW), true);
 		atomic_set(&dsi_ctrl->dma_irq_trig, 0);
 		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
 					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
 		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
 
 		/* trigger command */
-		dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
+		if ((dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) &&
+			dsi_hw_ops.schedule_dma_cmd &&
+			(dsi_ctrl->current_state.vid_engine_state ==
+			DSI_CTRL_ENGINE_ON)) {
+			/*
+			 * This change reads the video line count from
+			 * MDP_INTF_LINE_COUNT register and checks whether
+			 * DMA trigger happens close to the schedule line.
+			 * If it is not close to the schedule line, then DMA
+			 * command transfer is triggered.
+			 */
+			while (1) {
+				local_irq_save(flag);
+				cur_line =
+				dsi_hw_ops.log_line_count(&dsi_ctrl->hw,
+					dsi_ctrl->cmd_mode);
+				if (cur_line <
+					(schedule_line - latency_by_line) ||
+					cur_line > (schedule_line + 1)) {
+					dsi_hw_ops.trigger_command_dma(
+						&dsi_ctrl->hw);
+					local_irq_restore(flag);
+					break;
+				}
+				local_irq_restore(flag);
+				udelay(1000);
+			}
+		} else
+			dsi_hw_ops.trigger_command_dma(&dsi_ctrl->hw);
+
 		if (flags & DSI_CTRL_CMD_ASYNC_WAIT) {
 			dsi_ctrl->dma_wait_queued = true;
 			queue_work(dsi_ctrl->dma_cmd_workq,
@@ -3331,11 +3463,6 @@
 			dsi_ctrl_dma_cmd_wait_for_done(&dsi_ctrl->dma_cmd_wait);
 		}
 
-		if (dsi_hw_ops.mask_error_intr &&
-				!dsi_ctrl->esd_check_underway)
-			dsi_hw_ops.mask_error_intr(&dsi_ctrl->hw,
-					BIT(DSI_FIFO_OVERFLOW), false);
-
 		if (flags & DSI_CTRL_CMD_NON_EMBEDDED_MODE) {
 			if (dsi_ctrl->version < DSI_CTRL_VERSION_2_4)
 				dsi_hw_ops.soft_reset(&dsi_ctrl->hw);
@@ -3562,6 +3689,7 @@
 	else
 		dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, false);
 
+	SDE_EVT32(dsi_ctrl->cell_index, state);
 	DSI_CTRL_DEBUG(dsi_ctrl, "Set host engine state = %d\n", state);
 	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
 error:
@@ -3601,6 +3729,7 @@
 	else
 		dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, false);
 
+	SDE_EVT32(dsi_ctrl->cell_index, state);
 	DSI_CTRL_DEBUG(dsi_ctrl, "Set cmd engine state = %d\n", state);
 	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
 error:
@@ -3641,9 +3770,10 @@
 	dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, on);
 
 	/* perform a reset when turning off video engine */
-	if (!on)
+	if (!on && dsi_ctrl->version < DSI_CTRL_VERSION_1_3)
 		dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
 
+	SDE_EVT32(dsi_ctrl->cell_index, state);
 	DSI_CTRL_DEBUG(dsi_ctrl, "Set video engine state = %d\n", state);
 	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_VID_ENGINE, state);
 error:
diff --git a/msm/dsi/dsi_ctrl.h b/msm/dsi/dsi_ctrl.h
index b509cae..3f73ae1 100644
--- a/msm/dsi/dsi_ctrl.h
+++ b/msm/dsi/dsi_ctrl.h
@@ -239,6 +239,8 @@
  *                           insert null packet.
  * @modeupdated:	  Boolean to send new roi if mode is updated.
  * @split_link_supported: Boolean to check if hw supports split link.
+ * @cmd_mode:		Boolean to indicate if panel is running in
+			command mode.
  */
 struct dsi_ctrl {
 	struct platform_device *pdev;
@@ -299,6 +301,7 @@
 	bool null_insertion_enabled;
 	bool modeupdated;
 	bool split_link_supported;
+	bool cmd_mode;
 };
 
 /**
@@ -861,4 +864,18 @@
  * @dsi_ctrl:                      DSI controller handle.
  */
 int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl);
+
+/**
+ * dsi_ctrl_mask_overflow() -	API to mask/unmask overflow errors.
+ * @dsi_ctrl:			DSI controller handle.
+ * @enable:			variable to control masking/unmasking.
+ */
+void dsi_ctrl_mask_overflow(struct dsi_ctrl *dsi_ctrl, bool enable);
+
+/**
+ * dsi_ctrl_clear_slave_dma_status -   API to clear slave DMA status
+ * @dsi_ctrl:                   DSI controller handle.
+ * @flags:                      Modifiers
+ */
+int dsi_ctrl_clear_slave_dma_status(struct dsi_ctrl *dsi_ctrl, u32 flags);
 #endif /* _DSI_CTRL_H_ */
diff --git a/msm/dsi/dsi_ctrl_hw.h b/msm/dsi/dsi_ctrl_hw.h
index 2f55192..a6c7f2f 100644
--- a/msm/dsi/dsi_ctrl_hw.h
+++ b/msm/dsi/dsi_ctrl_hw.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _DSI_CTRL_HW_H_
@@ -31,6 +31,7 @@
 /**
  * enum dsi_ctrl_version - version of the dsi host controller
  * @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
+ * @DSI_CTRL_VERSION_1_3:     DSI host v1.3 controller
  * @DSI_CTRL_VERSION_1_4:     DSI host v1.4 controller
  * @DSI_CTRL_VERSION_2_0:     DSI host v2.0 controller
  * @DSI_CTRL_VERSION_2_2:     DSI host v2.2 controller
@@ -40,6 +41,7 @@
  */
 enum dsi_ctrl_version {
 	DSI_CTRL_VERSION_UNKNOWN,
+	DSI_CTRL_VERSION_1_3,
 	DSI_CTRL_VERSION_1_4,
 	DSI_CTRL_VERSION_2_0,
 	DSI_CTRL_VERSION_2_2,
@@ -836,6 +838,22 @@
 	 * @sel_phy:	Bool to control whether to select phy or controller
 	 */
 	void (*hs_req_sel)(struct dsi_ctrl_hw *ctrl, bool sel_phy);
+
+	/**
+	 * hw.ops.map_mdp_regs() - maps MDP interface line count registers.
+	 * @pdev:»       Pointer to platform device.
+	 * @ctrl:»       Pointer to the controller host hardware.
+	 */
+	int (*map_mdp_regs)(struct platform_device *pdev,
+			struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * hw.ops.log_line_count() - reads the MDP interface line count
+	 *							registers.
+	 * @ctrl:»       Pointer to the controller host hardware.
+	 * @cmd_mode:»       Boolean to indicate command mode operation.
+	 */
+	u32 (*log_line_count)(struct dsi_ctrl_hw *ctrl, bool cmd_mode);
 };
 
 /*
@@ -846,6 +864,13 @@
  * @mmss_misc_length:       Length of mmss_misc register map.
  * @disp_cc_base:           Base address of disp_cc register map.
  * @disp_cc_length:         Length of disp_cc register map.
+ * @te_rd_ptr_reg:          Address of MDP_TEAR_INTF_TEAR_LINE_COUNT. This
+ *                          register is used for testing and validating the RD
+ *                          ptr value when a CMD is triggered and it succeeds.
+ * @line_count_reg:         Address of MDP_TEAR_INTF_LINE_COUNT. This
+ *                          register is used for testing and validating the
+ *                          line count value when a CMD is triggered and it
+ *                          succeeds.
  * @index:                  Instance ID of the controller.
  * @feature_map:            Features supported by the DSI controller.
  * @ops:                    Function pointers to the operations supported by the
@@ -863,6 +888,8 @@
 	void __iomem *mmss_misc_base;
 	u32 mmss_misc_length;
 	void __iomem *disp_cc_base;
+	void __iomem *te_rd_ptr_reg;
+	void __iomem *line_count_reg;
 	u32 disp_cc_length;
 	u32 index;
 
diff --git a/msm/dsi/dsi_ctrl_hw_2_2.c b/msm/dsi/dsi_ctrl_hw_2_2.c
index 0c6d340..56821e8 100644
--- a/msm/dsi/dsi_ctrl_hw_2_2.c
+++ b/msm/dsi/dsi_ctrl_hw_2_2.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #include "dsi_ctrl_hw.h"
@@ -13,6 +13,10 @@
 /* register to configure DMA scheduling */
 #define DSI_DMA_SCHEDULE_CTRL 0x100
 
+/* MDP INTF registers to be mapped*/
+#define MDP_INTF1_TEAR_LINE_COUNT 0xAE6BAB0
+#define MDP_INTF1_LINE_COUNT 0xAE6B8B0
+
 /**
  * dsi_ctrl_hw_22_phy_reset_config() - to configure clamp control during ulps
  * @ctrl:          Pointer to the controller host hardware.
@@ -126,3 +130,61 @@
 
 	DSI_DISP_CC_W32(ctrl, DISP_CC_MISC_CMD_REG_OFF, reg);
 }
+
+/**
+ * dsi_ctrl_hw_22_map_mdp_regs() - maps MDP interface line count registers.
+ * @pdev:		Pointer to platform device.
+ * @ctrl:		Pointer to the controller host hardware.
+ *
+ * Return: 0 on success and error on failure.
+ */
+int dsi_ctrl_hw_22_map_mdp_regs(struct platform_device *pdev,
+			struct dsi_ctrl_hw *ctrl)
+{
+	int rc = 0;
+	void __iomem *ptr = NULL, *ptr1 = NULL;
+
+	if (ctrl->index == 0) {
+		ptr = devm_ioremap(&pdev->dev, MDP_INTF1_TEAR_LINE_COUNT, 1);
+		if (IS_ERR_OR_NULL(ptr)) {
+			DSI_CTRL_HW_ERR(ctrl,
+				"MDP TE LINE COUNT address not found\n");
+			rc = PTR_ERR(ptr);
+			return rc;
+		}
+
+		ptr1 = devm_ioremap(&pdev->dev, MDP_INTF1_LINE_COUNT, 1);
+		if (IS_ERR_OR_NULL(ptr1)) {
+			DSI_CTRL_HW_ERR(ctrl,
+				"MDP TE LINE COUNT address not found\n");
+			rc = PTR_ERR(ptr1);
+			return rc;
+		}
+	}
+
+	ctrl->te_rd_ptr_reg = ptr;
+	ctrl->line_count_reg = ptr1;
+
+	return rc;
+}
+
+/**
+ * dsi_ctrl_hw_22_log_line_count() - reads the MDP interface line count
+ *					registers.
+ * @ctrl:	Pointer to the controller host hardware.
+ * @cmd_mode:	Boolean to indicate command mode operation.
+ *
+ * Return: INTF register value.
+ */
+u32 dsi_ctrl_hw_22_log_line_count(struct dsi_ctrl_hw *ctrl, bool cmd_mode)
+{
+
+	u32 reg = 0;
+
+	if (cmd_mode && ctrl->te_rd_ptr_reg)
+		reg = readl_relaxed(ctrl->te_rd_ptr_reg);
+	else if (ctrl->line_count_reg)
+		reg = readl_relaxed(ctrl->line_count_reg);
+
+	return reg;
+}
diff --git a/msm/dsi/dsi_ctrl_hw_cmn.c b/msm/dsi/dsi_ctrl_hw_cmn.c
index 4177d28..e3e715c 100644
--- a/msm/dsi/dsi_ctrl_hw_cmn.c
+++ b/msm/dsi/dsi_ctrl_hw_cmn.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/delay.h>
@@ -92,6 +92,14 @@
 	dsi_setup_trigger_controls(ctrl, cfg);
 	dsi_split_link_setup(ctrl, cfg);
 
+	/* Setup T_CLK_PRE extend register */
+	reg_value = DSI_R32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_ENABLE);
+	if (cfg->t_clk_pre_extend)
+		reg_value |= BIT(0);
+	else
+		reg_value &= ~BIT(0);
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_ENABLE, reg_value);
+
 	/* Setup clocking timing controls */
 	reg_value = ((cfg->t_clk_post & 0x3F) << 8);
 	reg_value |= (cfg->t_clk_pre & 0x3F);
@@ -181,6 +189,7 @@
 	DSI_W32(ctrl, DSI_CTRL, reg_ctrl);
 	wmb(); /* make sure DSI controller is enabled again */
 	DSI_CTRL_HW_DBG(ctrl, "ctrl soft reset done\n");
+	SDE_EVT32(ctrl->index);
 }
 
 /**
@@ -769,7 +778,6 @@
 void dsi_ctrl_hw_cmn_trigger_command_dma(struct dsi_ctrl_hw *ctrl)
 {
 	DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
-	DSI_CTRL_HW_DBG(ctrl, "CMD DMA triggered\n");
 }
 
 /**
diff --git a/msm/dsi/dsi_defs.h b/msm/dsi/dsi_defs.h
index b1c3857..30fb220 100644
--- a/msm/dsi/dsi_defs.h
+++ b/msm/dsi/dsi_defs.h
@@ -480,6 +480,8 @@
  * @t_clk_pre:           Number of byte clock cycles that the high spped clock
  *                       shall be driven prior to data lane transitions from LP
  *                       to HS mode.
+ * @t_clk_pre_extend:    Increment t_clk_pre counter by 2 byteclk if set to
+ *                       true, otherwise increment by 1 byteclk.
  * @ignore_rx_eot:       Ignore Rx EOT packets if set to true.
  * @append_tx_eot:       Append EOT packets for forward transmissions if set to
  *                       true.
@@ -506,6 +508,7 @@
 	bool bit_swap_blue;
 	u32 t_clk_post;
 	u32 t_clk_pre;
+	bool t_clk_pre_extend;
 	bool ignore_rx_eot;
 	bool append_tx_eot;
 	bool ext_bridge_mode;
@@ -568,7 +571,7 @@
  * @common_config:         Host configuration common to both Video and Cmd mode.
  * @video_engine:          Video engine configuration if panel is in video mode.
  * @cmd_engine:            Cmd engine configuration if panel is in cmd mode.
- * @esc_clk_rate_khz:      Esc clock frequency in Hz.
+ * @esc_clk_rate_hz:      Esc clock frequency in Hz.
  * @bit_clk_rate_hz:       Bit clock frequency in Hz.
  * @bit_clk_rate_hz_override: DSI bit clk rate override from dt/sysfs.
  * @video_timing:          Video timing information of a frame.
diff --git a/msm/dsi/dsi_display.c b/msm/dsi/dsi_display.c
index 54f07ed..2079103 100644
--- a/msm/dsi/dsi_display.c
+++ b/msm/dsi/dsi_display.c
@@ -795,7 +795,7 @@
 		rc = -EINVAL;
 		goto release_panel_lock;
 	}
-	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, status_mode, te_check_override);
 
 	if (te_check_override && gpio_is_valid(dsi_display->disp_te_gpio))
 		status_mode = ESD_MODE_PANEL_TE;
@@ -843,7 +843,7 @@
 
 release_panel_lock:
 	dsi_panel_release_panel_lock(panel);
-	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, rc);
 
 	return rc;
 }
@@ -868,6 +868,9 @@
 		return -EINVAL;
 	}
 
+	if (cmd->last_command)
+		cmd->msg.flags |= MIPI_DSI_MSG_LASTCOMMAND;
+
 	for (i = 0; i < cmd->msg.tx_len; i++)
 		payload[i] = cmd_buf[7 + i];
 
@@ -1049,6 +1052,7 @@
 		return rc;
 	}
 
+	SDE_EVT32(display->panel->power_mode, power_mode, rc);
 	DSI_DEBUG("Power mode transition from %d to %d %s",
 			display->panel->power_mode, power_mode,
 			rc ? "failed" : "successful");
@@ -2681,6 +2685,23 @@
 	return 0;
 }
 
+static void dsi_display_mask_overflow(struct dsi_display *display, u32 flags,
+						bool enable)
+{
+	struct dsi_display_ctrl *ctrl;
+	int i;
+
+	if (!(flags & DSI_CTRL_CMD_LAST_COMMAND))
+		return;
+
+	display_for_each_ctrl(i, display) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl)
+			continue;
+		dsi_ctrl_mask_overflow(ctrl->ctrl, enable);
+	}
+}
+
 static int dsi_display_broadcast_cmd(struct dsi_display *display,
 				     const struct mipi_dsi_msg *msg)
 {
@@ -2710,6 +2731,7 @@
 	 * 2. Trigger commands
 	 */
 	m_ctrl = &display->ctrl[display->cmd_master_idx];
+	dsi_display_mask_overflow(display, m_flags, true);
 	rc = dsi_ctrl_cmd_transfer(m_ctrl->ctrl, msg, &m_flags);
 	if (rc) {
 		DSI_ERR("[%s] cmd transfer failed on master,rc=%d\n",
@@ -2744,7 +2766,21 @@
 		goto error;
 	}
 
+	display_for_each_ctrl(i, display) {
+		ctrl = &display->ctrl[i];
+		if (ctrl == m_ctrl)
+			continue;
+
+		rc = dsi_ctrl_clear_slave_dma_status(ctrl->ctrl, flags);
+		if (rc) {
+			DSI_ERR("[%s] clear interrupt status failed, rc=%d\n",
+				display->name, rc);
+			goto error;
+		}
+	}
+
 error:
+	dsi_display_mask_overflow(display, m_flags, false);
 	return rc;
 }
 
@@ -3006,11 +3042,15 @@
 	const char *mux_byte = "mux_byte", *mux_pixel = "mux_pixel";
 	const char *cphy_byte = "cphy_byte", *cphy_pixel = "cphy_pixel";
 	const char *shadow_byte = "shadow_byte", *shadow_pixel = "shadow_pixel";
+	const char *shadow_cphybyte = "shadow_cphybyte",
+		   *shadow_cphypixel = "shadow_cphypixel";
 	struct clk *dsi_clk;
 	struct dsi_clk_link_set *src = &display->clock_info.src_clks;
 	struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
 	struct dsi_clk_link_set *cphy = &display->clock_info.cphy_clks;
 	struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+	struct dsi_clk_link_set *shadow_cphy =
+				&display->clock_info.shadow_cphy_clks;
 	struct dsi_dyn_clk_caps *dyn_clk_caps = &(display->panel->dyn_clk_caps);
 	char *dsi_clock_name;
 
@@ -3069,6 +3109,12 @@
 				if (dsi_display_check_prefix(shadow_pixel,
 							clk_name))
 					shadow->pixel_clk = NULL;
+				if (dsi_display_check_prefix(shadow_cphybyte,
+							clk_name))
+					shadow_cphy->byte_clk = NULL;
+				if (dsi_display_check_prefix(shadow_cphypixel,
+							clk_name))
+					shadow_cphy->pixel_clk = NULL;
 
 				dyn_clk_caps->dyn_clk_support = false;
 			}
@@ -3113,6 +3159,16 @@
 			shadow->pixel_clk = dsi_clk;
 			continue;
 		}
+
+		if (dsi_display_check_prefix(shadow_cphybyte, clk_name)) {
+			shadow_cphy->byte_clk = dsi_clk;
+			continue;
+		}
+
+		if (dsi_display_check_prefix(shadow_cphypixel, clk_name)) {
+			shadow_cphy->pixel_clk = dsi_clk;
+			continue;
+		}
 	}
 
 	return 0;
@@ -3967,12 +4023,12 @@
 			byte_intf_clk_div = host_cfg->byte_intf_clk_div;
 			do_div(byte_intf_clk_rate, byte_intf_clk_div);
 		} else {
-			do_div(bit_rate, bits_per_symbol);
-			bit_rate *= num_of_symbols;
-			bit_rate_per_lane = bit_rate;
-			do_div(bit_rate_per_lane, num_of_lanes);
-			byte_clk_rate = bit_rate_per_lane;
-			do_div(byte_clk_rate, 7);
+			bit_rate_per_lane = bit_clk_rate;
+			pclk_rate *= bits_per_symbol;
+			do_div(pclk_rate, num_of_symbols);
+			byte_clk_rate = bit_clk_rate;
+			do_div(byte_clk_rate, num_of_symbols);
+
 			/* For CPHY, byte_intf_clk is same as byte_clk */
 			byte_intf_clk_rate = byte_clk_rate;
 		}
@@ -3982,6 +4038,7 @@
 		DSI_DEBUG("byte_clk_rate = %llu, byte_intf_clk_rate = %llu\n",
 			  byte_clk_rate, byte_intf_clk_rate);
 		DSI_DEBUG("pclk_rate = %llu\n", pclk_rate);
+		SDE_EVT32(i, bit_rate, byte_clk_rate, pclk_rate);
 
 		ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
 		ctrl->clk_freq.byte_intf_clk_rate = byte_intf_clk_rate;
@@ -4015,18 +4072,19 @@
 	struct dsi_display_ctrl *m_ctrl;
 	struct dsi_ctrl *dsi_ctrl;
 	struct dsi_phy_cfg *cfg;
+	int phy_ver;
 
 	m_ctrl = &display->ctrl[display->clk_master_idx];
 	dsi_ctrl = m_ctrl->ctrl;
 
 	cfg = &(m_ctrl->phy->cfg);
 
-	esc_clk_rate_hz = dsi_ctrl->clk_freq.esc_clk_rate * 1000;
-	pclk_to_esc_ratio = ((dsi_ctrl->clk_freq.pix_clk_rate * 1000) /
+	esc_clk_rate_hz = dsi_ctrl->clk_freq.esc_clk_rate;
+	pclk_to_esc_ratio = (dsi_ctrl->clk_freq.pix_clk_rate /
 			     esc_clk_rate_hz);
-	byte_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 1000) /
+	byte_to_esc_ratio = (dsi_ctrl->clk_freq.byte_clk_rate /
 			     esc_clk_rate_hz);
-	hr_bit_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 4 * 1000) /
+	hr_bit_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 4) /
 					esc_clk_rate_hz);
 
 	hsync_period = DSI_H_TOTAL_DSC(&mode->timing);
@@ -4052,8 +4110,28 @@
 			  ((cfg->timing.lane_v3[4] >> 1) + 1)) /
 			 hr_bit_to_esc_ratio);
 
-	/* 130 us pll delay recommended by h/w doc */
-	delay->pll_delay = ((130 * esc_clk_rate_hz) / 1000000) * 2;
+	/*
+	 * 100us pll delay recommended for phy ver 2.0 and 3.0
+	 * 25us pll delay recommended for phy ver 4.0
+	 */
+	phy_ver = dsi_phy_get_version(m_ctrl->phy);
+	if (phy_ver <= DSI_PHY_VERSION_3_0)
+		delay->pll_delay = 100;
+	else
+		delay->pll_delay = 25;
+
+	delay->pll_delay = ((delay->pll_delay * esc_clk_rate_hz) / 1000000);
+}
+
+/*
+ * dsi_display_is_type_cphy - check if panel type is cphy
+ * @display: Pointer to private display structure
+ * Returns: True if panel type is cphy
+ */
+static inline bool dsi_display_is_type_cphy(struct dsi_display *display)
+{
+	return (display->panel->host_config.phy_type ==
+		DSI_PHY_TYPE_CPHY) ? true : false;
 }
 
 static int _dsi_display_dyn_update_clks(struct dsi_display *display,
@@ -4061,15 +4139,24 @@
 {
 	int rc = 0, i;
 	struct dsi_display_ctrl *m_ctrl, *ctrl;
+	struct dsi_clk_link_set *parent_clk, *enable_clk;
 
 	m_ctrl = &display->ctrl[display->clk_master_idx];
 
-	dsi_clk_prepare_enable(&display->clock_info.src_clks);
+	if (dsi_display_is_type_cphy(display)) {
+		enable_clk = &display->clock_info.cphy_clks;
+		parent_clk = &display->clock_info.shadow_cphy_clks;
+	} else {
+		enable_clk = &display->clock_info.src_clks;
+		parent_clk = &display->clock_info.shadow_clks;
+	}
 
-	rc = dsi_clk_update_parent(&display->clock_info.shadow_clks,
-			      &display->clock_info.mux_clks);
+	dsi_clk_prepare_enable(enable_clk);
+
+	rc = dsi_clk_update_parent(parent_clk,
+				&display->clock_info.mux_clks);
 	if (rc) {
-		DSI_ERR("failed update mux parent to shadow\n");
+		DSI_ERR("failed to update mux parent\n");
 		goto exit;
 	}
 
@@ -4118,12 +4205,13 @@
 		dsi_phy_dynamic_refresh_clear(ctrl->phy);
 	}
 
-	rc = dsi_clk_update_parent(&display->clock_info.src_clks,
-			      &display->clock_info.mux_clks);
+	rc = dsi_clk_update_parent(enable_clk,
+				&display->clock_info.mux_clks);
+
 	if (rc)
 		DSI_ERR("could not switch back to src clks %d\n", rc);
 
-	dsi_clk_disable_unprepare(&display->clock_info.src_clks);
+	dsi_clk_disable_unprepare(enable_clk);
 
 	return rc;
 
@@ -4441,6 +4529,9 @@
 				DSI_V_TOTAL(timing),
 				timing->v_front_porch,
 				&adj_mode->timing.v_front_porch);
+		SDE_EVT32(SDE_EVTLOG_FUNC_CASE1, DSI_DFPS_IMMEDIATE_VFP,
+			curr_refresh_rate, timing->refresh_rate,
+			timing->v_front_porch, adj_mode->timing.v_front_porch);
 		break;
 
 	case DSI_DFPS_IMMEDIATE_HFP:
@@ -4451,6 +4542,9 @@
 				DSI_H_TOTAL_DSC(timing),
 				timing->h_front_porch,
 				&adj_mode->timing.h_front_porch);
+		SDE_EVT32(SDE_EVTLOG_FUNC_CASE2, DSI_DFPS_IMMEDIATE_HFP,
+			curr_refresh_rate, timing->refresh_rate,
+			timing->h_front_porch, adj_mode->timing.h_front_porch);
 		if (!rc)
 			adj_mode->timing.h_front_porch *= display->ctrl_count;
 		break;
@@ -4503,7 +4597,7 @@
 		return -EINVAL;
 	}
 
-	SDE_EVT32(mode->dsi_mode_flags);
+	SDE_EVT32(mode->dsi_mode_flags, mode->panel_mode);
 	if (mode->dsi_mode_flags & DSI_MODE_FLAG_POMS) {
 		display->config.panel_mode = mode->panel_mode;
 		display->panel->panel_mode = mode->panel_mode;
@@ -4629,7 +4723,7 @@
 		return -EINVAL;
 	}
 
-	if (!display->panel_node)
+	if (!display->panel_node && !display->fw)
 		return 0;
 
 	mutex_lock(&display->display_lock);
@@ -4714,6 +4808,7 @@
 	/* Update splash status for clock manager */
 	dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
 				display->is_cont_splash_enabled);
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, display->is_cont_splash_enabled);
 
 	/* Set up ctrl isr before enabling core clk */
 	dsi_display_ctrl_isr_configure(display, true);
@@ -4782,6 +4877,7 @@
 	dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
 				display->is_cont_splash_enabled);
 
+	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, display->is_cont_splash_enabled);
 	return rc;
 }
 
@@ -4875,7 +4971,7 @@
 				drm, display);
 		return -EINVAL;
 	}
-	if (!display->panel_node)
+	if (!display->panel_node && !display->fw)
 		return 0;
 
 	if (!display->fw)
@@ -5170,11 +5266,19 @@
 	struct dsi_display *display = context;
 
 	if (fw) {
-		DSI_DEBUG("reading data from firmware, size=%zd\n",
+		DSI_INFO("reading data from firmware, size=%zd\n",
 			fw->size);
 
 		display->fw = fw;
-		display->name = "dsi_firmware_display";
+
+		if (!strcmp(display->display_type, "primary"))
+			display->name = "dsi_firmware_display";
+
+		else if (!strcmp(display->display_type, "secondary"))
+			display->name = "dsi_firmware_display_secondary";
+
+	} else {
+		DSI_INFO("no firmware available, fallback to device node\n");
 	}
 
 	if (dsi_display_init(display))
@@ -5238,12 +5342,6 @@
 				"qcom,dsi-default-panel", 0);
 		if (!panel_node)
 			DSI_WARN("default panel not found\n");
-
-		if (IS_ENABLED(CONFIG_DSI_PARSER))
-			firm_req = !request_firmware_nowait(
-				THIS_MODULE, 1, "dsi_prop",
-				&pdev->dev, GFP_KERNEL, display,
-				dsi_display_firmware_display);
 	}
 
 	boot_disp->node = pdev->dev.of_node;
@@ -5258,6 +5356,20 @@
 	platform_set_drvdata(pdev, display);
 
 	/* initialize display in firmware callback */
+	if (!boot_disp->boot_disp_en && IS_ENABLED(CONFIG_DSI_PARSER)) {
+		if (!strcmp(display->display_type, "primary"))
+			firm_req = !request_firmware_nowait(
+				THIS_MODULE, 1, "dsi_prop",
+				&pdev->dev, GFP_KERNEL, display,
+				dsi_display_firmware_display);
+
+		else if (!strcmp(display->display_type, "secondary"))
+			firm_req = !request_firmware_nowait(
+				THIS_MODULE, 1, "dsi_prop_sec",
+				&pdev->dev, GFP_KERNEL, display,
+				dsi_display_firmware_display);
+	}
+
 	if (!firm_req) {
 		rc = dsi_display_init(display);
 		if (rc)
@@ -5314,7 +5426,8 @@
 	for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
 		struct dsi_display *display = boot_displays[i].disp;
 
-		if (display && display->panel_node)
+		if ((display && display->panel_node) ||
+					(display && display->fw))
 			count++;
 	}
 
@@ -5333,7 +5446,8 @@
 	for (index = 0; index < MAX_DSI_ACTIVE_DISPLAY; index++) {
 		struct dsi_display *display = boot_displays[index].disp;
 
-		if (display && display->panel_node)
+		if ((display && display->panel_node) ||
+					(display && display->fw))
 			display_array[count++] = display;
 	}
 
@@ -5937,12 +6051,15 @@
 	return 0;
 }
 
-void dsi_display_adjust_mode_timing(
-			struct dsi_dyn_clk_caps *dyn_clk_caps,
+void dsi_display_adjust_mode_timing(struct dsi_display *display,
 			struct dsi_display_mode *dsi_mode,
 			int lanes, int bpp)
 {
 	u64 new_htotal, new_vtotal, htotal, vtotal, old_htotal, div;
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
+	u32 bits_per_symbol = 16, num_of_symbols = 7; /* For Cphy */
+
+	dyn_clk_caps = &(display->panel->dyn_clk_caps);
 
 	/* Constant FPS is not supported on command mode */
 	if (dsi_mode->panel_mode == DSI_OP_CMD_MODE)
@@ -5961,21 +6078,31 @@
 	case DSI_DYN_CLK_TYPE_CONST_FPS_ADJUST_HFP:
 		vtotal = DSI_V_TOTAL(&dsi_mode->timing);
 		old_htotal = DSI_H_TOTAL_DSC(&dsi_mode->timing);
+		do_div(old_htotal, display->ctrl_count);
 		new_htotal = dsi_mode->timing.clk_rate_hz * lanes;
 		div = bpp * vtotal * dsi_mode->timing.refresh_rate;
+		if (dsi_display_is_type_cphy(display)) {
+			new_htotal = new_htotal * bits_per_symbol;
+			div = div * num_of_symbols;
+		}
 		do_div(new_htotal, div);
 		if (old_htotal > new_htotal)
 			dsi_mode->timing.h_front_porch -=
-					(old_htotal - new_htotal);
+			((old_htotal - new_htotal) * display->ctrl_count);
 		else
 			dsi_mode->timing.h_front_porch +=
-					(new_htotal - old_htotal);
+			((new_htotal - old_htotal) * display->ctrl_count);
 		break;
 
 	case DSI_DYN_CLK_TYPE_CONST_FPS_ADJUST_VFP:
 		htotal = DSI_H_TOTAL_DSC(&dsi_mode->timing);
+		do_div(htotal, display->ctrl_count);
 		new_vtotal = dsi_mode->timing.clk_rate_hz * lanes;
 		div = bpp * htotal * dsi_mode->timing.refresh_rate;
+		if (dsi_display_is_type_cphy(display)) {
+			new_vtotal = new_vtotal * bits_per_symbol;
+			div = div * num_of_symbols;
+		}
 		do_div(new_vtotal, div);
 		dsi_mode->timing.v_front_porch = new_vtotal -
 				dsi_mode->timing.v_back_porch -
@@ -6028,7 +6155,7 @@
 		 */
 		src->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[0];
 
-		dsi_display_adjust_mode_timing(dyn_clk_caps, src, lanes, bpp);
+		dsi_display_adjust_mode_timing(display, src, lanes, bpp);
 
 		src->pixel_clk_khz =
 			div_u64(src->timing.clk_rate_hz * lanes, bpp);
@@ -6050,7 +6177,7 @@
 			memcpy(dst, src, sizeof(struct dsi_display_mode));
 			dst->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[i];
 
-			dsi_display_adjust_mode_timing(dyn_clk_caps, dst, lanes,
+			dsi_display_adjust_mode_timing(display, dst, lanes,
 									bpp);
 
 			dst->pixel_clk_khz =
@@ -6440,10 +6567,13 @@
 				dyn_clk_caps->maintain_const_fps) {
 				DSI_DEBUG("Mode switch is seamless variable refresh\n");
 				adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
-				SDE_EVT32(cur_mode->timing.refresh_rate,
+				SDE_EVT32(SDE_EVTLOG_FUNC_CASE1,
+					cur_mode->timing.refresh_rate,
 					adj_mode->timing.refresh_rate,
 					cur_mode->timing.h_front_porch,
-					adj_mode->timing.h_front_porch);
+					adj_mode->timing.h_front_porch,
+					cur_mode->timing.v_front_porch,
+					adj_mode->timing.v_front_porch);
 			} else {
 				DSI_DEBUG("Switching to %d FPS with mode switch\n",
 					adj_mode->timing.refresh_rate);
@@ -6466,8 +6596,9 @@
 
 				adj_mode->dsi_mode_flags |=
 						DSI_MODE_FLAG_DYN_CLK;
-				SDE_EVT32(cur_mode->pixel_clk_khz,
-						adj_mode->pixel_clk_khz);
+				SDE_EVT32(SDE_EVTLOG_FUNC_CASE2,
+					cur_mode->pixel_clk_khz,
+					adj_mode->pixel_clk_khz);
 			}
 		}
 	}
diff --git a/msm/dsi/dsi_display.h b/msm/dsi/dsi_display.h
index e2c73c6..742f55c 100644
--- a/msm/dsi/dsi_display.h
+++ b/msm/dsi/dsi_display.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _DSI_DISPLAY_H_
@@ -106,13 +106,15 @@
  * struct dsi_display_clk_info - dsi display clock source information
  * @src_clks:          Source clocks for DSI display.
  * @mux_clks:          Mux clocks used for DFPS.
- * @shadow_clks:       Used for DFPS.
+ * @shadow_clks:       Used for D-phy clock switch.
+ * @shadow_cphy_clks:  Used for C-phy clock switch.
  */
 struct dsi_display_clk_info {
 	struct dsi_clk_link_set src_clks;
 	struct dsi_clk_link_set mux_clks;
 	struct dsi_clk_link_set cphy_clks;
 	struct dsi_clk_link_set shadow_clks;
+	struct dsi_clk_link_set shadow_cphy_clks;
 };
 
 /**
diff --git a/msm/dsi/dsi_drm.c b/msm/dsi/dsi_drm.c
index 584b37b..68d01b0 100644
--- a/msm/dsi/dsi_drm.c
+++ b/msm/dsi/dsi_drm.c
@@ -11,6 +11,7 @@
 #include "sde_connector.h"
 #include "dsi_drm.h"
 #include "sde_trace.h"
+#include "sde_dbg.h"
 
 #define to_dsi_bridge(x)     container_of((x), struct dsi_bridge, base)
 #define to_dsi_state(x)      container_of((x), struct dsi_connector_state, base)
@@ -415,16 +416,32 @@
 		if ((dsi_mode.panel_mode != cur_dsi_mode.panel_mode) &&
 			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
 			(crtc_state->enable ==
-				crtc_state->crtc->state->enable))
+				crtc_state->crtc->state->enable)) {
 			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_POMS;
+
+			SDE_EVT32(SDE_EVTLOG_FUNC_CASE1,
+				dsi_mode.timing.h_active,
+				dsi_mode.timing.v_active,
+				dsi_mode.timing.refresh_rate,
+				dsi_mode.pixel_clk_khz,
+				dsi_mode.panel_mode);
+		}
 		/* No DMS/VRR when drm pipeline is changing */
 		if (!drm_mode_equal(cur_mode, adjusted_mode) &&
 			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
 			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_POMS)) &&
 			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)) &&
 			(!crtc_state->active_changed ||
-			 display->is_cont_splash_enabled))
+			 display->is_cont_splash_enabled)) {
 			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
+
+			SDE_EVT32(SDE_EVTLOG_FUNC_CASE2,
+				dsi_mode.timing.h_active,
+				dsi_mode.timing.v_active,
+				dsi_mode.timing.refresh_rate,
+				dsi_mode.pixel_clk_khz,
+				dsi_mode.panel_mode);
+		}
 	}
 
 	/* Reject seamless transition when active changed */
diff --git a/msm/dsi/dsi_panel.c b/msm/dsi/dsi_panel.c
index cae05e5..f5cd6e7 100644
--- a/msm/dsi/dsi_panel.c
+++ b/msm/dsi/dsi_panel.c
@@ -13,6 +13,7 @@
 #include "dsi_panel.h"
 #include "dsi_ctrl_hw.h"
 #include "dsi_parser.h"
+#include "sde_dbg.h"
 
 /**
  * topology is currently defined by a set of following 3 values:
@@ -352,6 +353,7 @@
 
 	if (gpio_is_valid(r_config->reset_gpio)) {
 		gpio_set_value(r_config->reset_gpio, 0);
+		SDE_EVT32(SDE_EVTLOG_FUNC_CASE1);
 		DSI_INFO("GPIO pulled low to simulate ESD\n");
 		return 0;
 	}
@@ -491,7 +493,8 @@
 	if (gpio_is_valid(panel->reset_config.disp_en_gpio))
 		gpio_set_value(panel->reset_config.disp_en_gpio, 0);
 
-	if (gpio_is_valid(panel->reset_config.reset_gpio))
+	if (gpio_is_valid(panel->reset_config.reset_gpio) &&
+					!panel->reset_gpio_always_on)
 		gpio_set_value(panel->reset_config.reset_gpio, 0);
 
 	if (gpio_is_valid(panel->reset_config.lcd_mode_sel_gpio))
@@ -945,6 +948,9 @@
 		DSI_DEBUG("[%s] t_clk_pre = %d\n", name, val);
 	}
 
+	host->t_clk_pre_extend = utils->read_bool(utils->data,
+						"qcom,mdss-dsi-t-clk-pre-extend");
+
 	host->ignore_rx_eot = utils->read_bool(utils->data,
 						"qcom,mdss-dsi-rx-eot-ignore");
 
@@ -2217,6 +2223,9 @@
 	panel->lp11_init = utils->read_bool(utils->data,
 			"qcom,mdss-dsi-lp11-init");
 
+	panel->reset_gpio_always_on = utils->read_bool(utils->data,
+			"qcom,platform-reset-gpio-always-on");
+
 	if (!utils->read_u32(utils->data,
 				  "qcom,mdss-dsi-init-delay-us",
 				  &val)) {
@@ -4921,6 +4930,7 @@
 	}
 	DSI_DEBUG("[%s] send roi x %d y %d w %d h %d\n", panel->name,
 			roi->x, roi->y, roi->w, roi->h);
+	SDE_EVT32(roi->x, roi->y, roi->w, roi->h);
 
 	mutex_lock(&panel->panel_lock);
 
diff --git a/msm/dsi/dsi_panel.h b/msm/dsi/dsi_panel.h
index 01be1ea..5f901a6 100644
--- a/msm/dsi/dsi_panel.h
+++ b/msm/dsi/dsi_panel.h
@@ -352,6 +352,7 @@
 	bool ulps_feature_enabled;
 	bool ulps_suspend_enabled;
 	bool allow_phy_power_off;
+	bool reset_gpio_always_on;
 	atomic_t esd_recovery_pending;
 
 	bool panel_initialized;
diff --git a/msm/dsi/dsi_phy_hw_v4_0.c b/msm/dsi/dsi_phy_hw_v4_0.c
index d780f9f..220e9de 100644
--- a/msm/dsi/dsi_phy_hw_v4_0.c
+++ b/msm/dsi/dsi_phy_hw_v4_0.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/math64.h>
@@ -652,6 +652,8 @@
 					struct dsi_phy_cfg *cfg, bool is_master)
 {
 	u32 reg;
+	bool is_cphy = (cfg->phy_type == DSI_PHY_TYPE_CPHY) ?
+			true : false;
 
 	if (is_master) {
 		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL19,
@@ -677,7 +679,7 @@
 			  cfg->timing.lane_v4[12], cfg->timing.lane_v4[13]);
 		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL26,
 			  DSIPHY_CMN_CTRL_0, DSIPHY_CMN_LANE_CTRL0,
-			  0x7f, 0x1f);
+			  0x7f, is_cphy ? 0x17 : 0x1f);
 
 	} else {
 		reg = DSI_R32(phy, DSIPHY_CMN_CLK_CFG1);
@@ -712,7 +714,7 @@
 			  cfg->timing.lane_v4[13], 0x7f);
 		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
 			  DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_2,
-			  0x1f, 0x40);
+			  is_cphy ? 0x17 : 0x1f, 0x40);
 		/*
 		 * fill with dummy register writes since controller will blindly
 		 * send these values to DSI PHY.
@@ -721,7 +723,7 @@
 		while (reg <= DSI_DYN_REFRESH_PLL_CTRL29) {
 			DSI_DYN_REF_REG_W(phy->dyn_pll_base, reg,
 				  DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_0,
-				  0x1f, 0x7f);
+				  is_cphy ? 0x17 : 0x1f, 0x7f);
 			reg += 0x4;
 		}
 
diff --git a/msm/dsi/dsi_phy_timing_calc.c b/msm/dsi/dsi_phy_timing_calc.c
index 948e203..655b817 100644
--- a/msm/dsi/dsi_phy_timing_calc.c
+++ b/msm/dsi/dsi_phy_timing_calc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #include "dsi_phy_timing_calc.h"
@@ -37,6 +37,12 @@
 	s64 intermediate;
 	s64 clk_prep_actual;
 
+	t->rec_min = DIV_ROUND_UP((t->mipi_min * clk_params->bitclk_mbps),
+			(8 * clk_params->tlpx_numer_ns));
+	t->rec_max = rounddown(
+		mult_frac((t->mipi_max * clk_params->bitclk_mbps),
+			1, (8 * clk_params->tlpx_numer_ns)), 1);
+
 	dividend = ((t->rec_max - t->rec_min) *
 		clk_params->clk_prep_buf * multiplier);
 	temp  = roundup(div_s64(dividend, 100), multiplier);
@@ -623,6 +629,218 @@
 }
 
 /**
+ * calc_cphy_clk_prepare - calculates cphy_clk_prepare parameter for cphy.
+ */
+static int calc_cphy_clk_prepare(struct dsi_phy_hw *phy,
+			struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc)
+{
+	u64 multiplier = BIT(20);
+	struct timing_entry *t = &desc->clk_prepare;
+	int rc = 0;
+	u64 dividend, temp;
+
+	t->rec_min = DIV_ROUND_UP((t->mipi_min * clk_params->bitclk_mbps),
+			(7 * clk_params->tlpx_numer_ns));
+	t->rec_max = rounddown(
+		mult_frac((t->mipi_max * clk_params->bitclk_mbps),
+			1, (7 * clk_params->tlpx_numer_ns)), 1);
+
+	dividend = ((t->rec_max - t->rec_min) *
+		clk_params->clk_prep_buf * multiplier);
+	temp  = roundup(div_s64(dividend, 100), multiplier);
+	temp += (t->rec_min * multiplier);
+	t->rec = div_s64(temp, multiplier);
+
+	rc = dsi_phy_cmn_validate_and_set(t, "cphy_clk_prepare");
+
+	DSI_DEBUG("CPHY_CLK_PREPARE: rec_min=%d, rec_max=%d, reg_val=%d\n",
+		t->rec_min, t->rec_max, t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_cphy_clk_pre - calculates cphy_clk_pre parameter for cphy.
+ */
+static int calc_cphy_clk_pre(struct dsi_phy_hw *phy,
+			struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc)
+{
+	u64 multiplier = BIT(20);
+	struct timing_entry *t = &desc->clk_pre;
+	int rc = 0;
+	u64 dividend, temp;
+
+	t->mipi_min = min(300 - 38 - mult_frac(7, clk_params->tlpx_numer_ns,
+			clk_params->bitclk_mbps),
+			mult_frac(448, clk_params->tlpx_numer_ns,
+			clk_params->bitclk_mbps));
+	t->mipi_max = mult_frac(448, clk_params->tlpx_numer_ns,
+			clk_params->bitclk_mbps);
+
+	t->rec_min = DIV_ROUND_UP((t->mipi_min * clk_params->bitclk_mbps),
+			(7 * clk_params->tlpx_numer_ns));
+	t->rec_max = rounddown(
+		mult_frac((t->mipi_max * clk_params->bitclk_mbps),
+			1, (7 * clk_params->tlpx_numer_ns)), 1);
+
+	dividend = ((t->rec_max - t->rec_min) * clk_params->clk_pre_buf
+			* multiplier);
+	temp  = roundup(div_s64(dividend, 100), multiplier);
+	temp += (t->rec_min * multiplier);
+	t->rec = div_s64(temp, multiplier);
+
+	rc = dsi_phy_cmn_validate_and_set(t, "cphy_clk_pre");
+
+	DSI_DEBUG("CPHY_CLK_PRE: rec_min=%d, rec_max=%d, reg_val=%d\n",
+		t->rec_min, t->rec_max, t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_cphy_clk_post - calculates cphy_clk_post parameter for cphy.
+ */
+static int calc_cphy_clk_post(struct dsi_phy_hw *phy,
+			struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc)
+{
+	u64 multiplier = BIT(20);
+	struct timing_entry *t = &desc->clk_post;
+	int rc = 0;
+	u64 dividend, temp;
+
+	t->mipi_min = mult_frac(7, clk_params->tlpx_numer_ns,
+			clk_params->bitclk_mbps);
+	t->mipi_max = mult_frac(224, clk_params->tlpx_numer_ns,
+			clk_params->bitclk_mbps);
+
+	t->rec_min = DIV_ROUND_UP((t->mipi_min * clk_params->bitclk_mbps),
+			(7 * clk_params->tlpx_numer_ns));
+	t->rec_max = rounddown(
+		mult_frac((t->mipi_max * clk_params->bitclk_mbps),
+			  1, (7 * clk_params->tlpx_numer_ns)), 1);
+
+	dividend = ((t->rec_max - t->rec_min) * clk_params->clk_post_buf
+			* multiplier);
+	temp  = roundup(div_s64(dividend, 100), multiplier);
+	temp += (t->rec_min * multiplier);
+	t->rec = div_s64(temp, multiplier);
+
+	rc = dsi_phy_cmn_validate_and_set(t, "cphy_clk_post");
+
+	DSI_DEBUG("CPHY_CLK_POST: rec_min=%d, rec_max=%d, reg_val=%d\n",
+		t->rec_min, t->rec_max, t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_cphy_hs_rqst - calculates cphy_hs_rqst parameter for cphy.
+ */
+static int calc_cphy_hs_rqst(struct dsi_phy_hw *phy,
+			struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc)
+{
+	u64 multiplier = BIT(20);
+	struct timing_entry *t = &desc->hs_rqst;
+	int rc = 0;
+	u64 dividend, temp;
+
+	t->rec_min = DIV_ROUND_UP(
+		((t->mipi_min * clk_params->bitclk_mbps) -
+		 (7 * clk_params->tlpx_numer_ns)),
+		(7 * clk_params->tlpx_numer_ns));
+
+	dividend = ((t->rec_max - t->rec_min) *
+		clk_params->hs_rqst_buf * multiplier);
+	temp  = roundup(div_s64(dividend, 100), multiplier);
+	temp += t->rec_min * multiplier;
+	t->rec = div_s64(temp, multiplier);
+
+	rc = dsi_phy_cmn_validate_and_set(t, "cphy_hs_rqst");
+
+	DSI_DEBUG("CPHY_HS_RQST: rec_min=%d, rec_max=%d, reg_val=%d\n",
+		t->rec_min, t->rec_max, t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_cphy_hs_exit - calculates cphy_hs_exit parameter for cphy.
+ */
+static int calc_cphy_hs_exit(struct dsi_phy_hw *phy,
+			struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc)
+{
+	int rc = 0;
+	u64 multiplier = BIT(20);
+	u64 dividend, temp;
+	struct timing_entry *t = &desc->hs_exit;
+
+	t->rec_min = (DIV_ROUND_UP(
+			(t->mipi_min * clk_params->bitclk_mbps),
+			(7 * clk_params->tlpx_numer_ns)) - 1);
+
+	dividend = ((t->rec_max - t->rec_min) *
+		clk_params->hs_exit_buf * multiplier);
+	temp  = roundup(div_s64(dividend, 100), multiplier);
+	temp += t->rec_min * multiplier;
+	t->rec = div_s64(temp, multiplier);
+
+	rc = dsi_phy_cmn_validate_and_set(t, "cphy_hs_exit");
+
+	DSI_DEBUG("CPHY_HS_EXIT: rec_min=%d, rec_max=%d, reg_val=%d\n",
+		t->rec_min, t->rec_max, t->reg_value);
+
+	return rc;
+}
+
+/**
+ * dsi_phy_calc_cphy_timing_params - calculates cphy timing parameters
+ *					for a given bit clock
+ */
+static int dsi_phy_cmn_calc_cphy_timing_params(struct dsi_phy_hw *phy,
+	struct phy_clk_params *clk_params, struct phy_timing_desc *desc)
+{
+	int rc = 0;
+
+	rc = calc_cphy_clk_prepare(phy, clk_params, desc);
+	if (rc) {
+		DSI_ERR("clk_prepare calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_cphy_clk_pre(phy, clk_params, desc);
+	if (rc) {
+		DSI_ERR("clk_pre calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_cphy_clk_post(phy, clk_params, desc);
+	if (rc) {
+		DSI_ERR("clk_zero calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_cphy_hs_rqst(phy, clk_params, desc);
+	if (rc) {
+		DSI_ERR("hs_rqst calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_cphy_hs_exit(phy, clk_params, desc);
+	if (rc) {
+		DSI_ERR("hs_exit calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+error:
+	return rc;
+}
+
+/**
  * calculate_timing_params() - calculates timing parameters.
  * @phy:      Pointer to DSI PHY hardware object.
  * @mode:     Mode information for which timing has to be calculated.
@@ -648,6 +866,7 @@
 	u32 const hs_exit_spec_min = 100;
 	u32 const hs_exit_reco_max = 255;
 	u32 const hs_rqst_spec_min = 50;
+	u32 const hs_rqst_reco_max = 255;
 
 	/* local vars */
 	int rc = 0;
@@ -660,6 +879,8 @@
 	struct phy_clk_params clk_params = {0};
 	struct phy_timing_ops *ops = phy->ops.timing_ops;
 
+	u32 phy_type = host->phy_type;
+
 	memset(&desc, 0x0, sizeof(desc));
 	h_total = DSI_H_TOTAL_DSC(mode);
 	v_total = DSI_V_TOTAL(mode);
@@ -680,8 +901,11 @@
 
 	if (use_mode_bit_clk)
 		x = mode->clk_rate_hz;
-	else
+	else {
 		x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+		if (phy_type == DSI_PHY_TYPE_CPHY)
+			x = mult_frac(x, 7, 16);
+	}
 	y = rounddown(x, 1);
 
 	clk_params.bitclk_mbps = rounddown(DIV_ROUND_UP_ULL(y, 1000000), 1);
@@ -699,35 +923,31 @@
 	desc.hs_exit.rec_max = hs_exit_reco_max;
 	desc.hs_rqst.mipi_min = hs_rqst_spec_min;
 	desc.hs_rqst_clk.mipi_min = hs_rqst_spec_min;
+	desc.hs_rqst.rec_max = hs_rqst_reco_max;
 
 	if (ops->get_default_phy_params) {
-		ops->get_default_phy_params(&clk_params);
+		ops->get_default_phy_params(&clk_params, phy_type);
 	} else {
 		rc = -EINVAL;
 		goto error;
 	}
 
-	desc.clk_prepare.rec_min = DIV_ROUND_UP(
-			(desc.clk_prepare.mipi_min * clk_params.bitclk_mbps),
-			(8 * clk_params.tlpx_numer_ns)
-			);
-
-	desc.clk_prepare.rec_max = rounddown(
-		mult_frac((desc.clk_prepare.mipi_max * clk_params.bitclk_mbps),
-			  1, (8 * clk_params.tlpx_numer_ns)),
-		1);
-
 	DSI_PHY_DBG(phy, "BIT CLOCK = %d, tlpx_numer_ns=%d, treot_ns=%d\n",
 	       clk_params.bitclk_mbps, clk_params.tlpx_numer_ns,
 	       clk_params.treot_ns);
-	rc = dsi_phy_cmn_calc_timing_params(phy, &clk_params, &desc);
+
+	if (phy_type == DSI_PHY_TYPE_CPHY)
+		rc = dsi_phy_cmn_calc_cphy_timing_params(phy, &clk_params,
+							&desc);
+	else
+		rc = dsi_phy_cmn_calc_timing_params(phy, &clk_params, &desc);
 	if (rc) {
 		DSI_PHY_ERR(phy, "Timing calc failed, rc=%d\n", rc);
 		goto error;
 	}
 
 	if (ops->update_timing_params) {
-		ops->update_timing_params(timing, &desc);
+		ops->update_timing_params(timing, &desc, phy_type);
 	} else {
 		rc = -EINVAL;
 		goto error;
diff --git a/msm/dsi/dsi_phy_timing_calc.h b/msm/dsi/dsi_phy_timing_calc.h
index 2ed5e72..536e767 100644
--- a/msm/dsi/dsi_phy_timing_calc.h
+++ b/msm/dsi/dsi_phy_timing_calc.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _DSI_PHY_TIMING_CALC_H_
@@ -81,7 +81,8 @@
  * Various Ops needed for auto-calculation of DSI PHY timing parameters.
  */
 struct phy_timing_ops {
-	void (*get_default_phy_params)(struct phy_clk_params *params);
+	void (*get_default_phy_params)(struct phy_clk_params *params,
+				       u32 phy_type);
 
 	int32_t (*calc_clk_zero)(s64 rec_temp1, s64 mult);
 
@@ -96,14 +97,15 @@
 			struct phy_timing_desc *desc);
 
 	void (*update_timing_params)(struct dsi_phy_per_lane_cfgs *timing,
-		struct phy_timing_desc *desc);
+		struct phy_timing_desc *desc, u32 phy_type);
 };
 
 #define roundup64(x, y) \
 	({ u64 _tmp = (x)+(y)-1; do_div(_tmp, y); _tmp * y; })
 
 /* DSI PHY timing functions for 14nm */
-void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params);
+void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params,
+					    u32 phy_type);
 
 int32_t dsi_phy_hw_v2_0_calc_clk_zero(s64 rec_temp1, s64 mult);
 
@@ -118,10 +120,11 @@
 		struct phy_timing_desc *desc);
 
 void dsi_phy_hw_v2_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
-		struct phy_timing_desc *desc);
+		struct phy_timing_desc *desc, u32 phy_type);
 
 /* DSI PHY timing functions for 10nm */
-void dsi_phy_hw_v3_0_get_default_phy_params(struct phy_clk_params *params);
+void dsi_phy_hw_v3_0_get_default_phy_params(struct phy_clk_params *params,
+					    u32 phy_type);
 
 int32_t dsi_phy_hw_v3_0_calc_clk_zero(s64 rec_temp1, s64 mult);
 
@@ -136,10 +139,11 @@
 		struct phy_timing_desc *desc);
 
 void dsi_phy_hw_v3_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
-		struct phy_timing_desc *desc);
+		struct phy_timing_desc *desc, u32 phy_type);
 
 /* DSI PHY timing functions for 7nm */
-void dsi_phy_hw_v4_0_get_default_phy_params(struct phy_clk_params *params);
+void dsi_phy_hw_v4_0_get_default_phy_params(struct phy_clk_params *params,
+					    u32 phy_type);
 
 int32_t dsi_phy_hw_v4_0_calc_clk_zero(s64 rec_temp1, s64 mult);
 
@@ -154,6 +158,6 @@
 		struct phy_timing_desc *desc);
 
 void dsi_phy_hw_v4_0_update_timing_params(struct dsi_phy_per_lane_cfgs *timing,
-		struct phy_timing_desc *desc);
+		struct phy_timing_desc *desc, u32 phy_type);
 
 #endif /* _DSI_PHY_TIMING_CALC_H_ */
diff --git a/msm/dsi/dsi_phy_timing_v2_0.c b/msm/dsi/dsi_phy_timing_v2_0.c
index d3c1cb1..3839993 100644
--- a/msm/dsi/dsi_phy_timing_v2_0.c
+++ b/msm/dsi/dsi_phy_timing_v2_0.c
@@ -1,11 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #include "dsi_phy_timing_calc.h"
 
-void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params)
+void dsi_phy_hw_v2_0_get_default_phy_params(struct phy_clk_params *params,
+					    u32 phy_type)
 {
 	params->clk_prep_buf = 50;
 	params->clk_zero_buf = 2;
@@ -77,7 +78,7 @@
 
 void dsi_phy_hw_v2_0_update_timing_params(
 	struct dsi_phy_per_lane_cfgs *timing,
-	struct phy_timing_desc *desc)
+	struct phy_timing_desc *desc, u32 phy_type)
 {
 	int i = 0;
 
diff --git a/msm/dsi/dsi_phy_timing_v3_0.c b/msm/dsi/dsi_phy_timing_v3_0.c
index 562d296..c57b6b5 100644
--- a/msm/dsi/dsi_phy_timing_v3_0.c
+++ b/msm/dsi/dsi_phy_timing_v3_0.c
@@ -1,12 +1,12 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #include "dsi_phy_timing_calc.h"
 
 void dsi_phy_hw_v3_0_get_default_phy_params(
-		struct phy_clk_params *params)
+		struct phy_clk_params *params, u32 phy_type)
 {
 	params->clk_prep_buf = 0;
 	params->clk_zero_buf = 0;
@@ -72,7 +72,7 @@
 
 void dsi_phy_hw_v3_0_update_timing_params(
 	struct dsi_phy_per_lane_cfgs *timing,
-	struct phy_timing_desc *desc)
+	struct phy_timing_desc *desc, u32 phy_type)
 {
 	timing->lane_v3[0] = 0x00;
 	timing->lane_v3[1] = desc->clk_zero.reg_value;
diff --git a/msm/dsi/dsi_phy_timing_v4_0.c b/msm/dsi/dsi_phy_timing_v4_0.c
index eb6a8f7..1127628 100644
--- a/msm/dsi/dsi_phy_timing_v4_0.c
+++ b/msm/dsi/dsi_phy_timing_v4_0.c
@@ -1,24 +1,32 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #include "dsi_phy_timing_calc.h"
 
 void dsi_phy_hw_v4_0_get_default_phy_params(
-		struct phy_clk_params *params)
+		struct phy_clk_params *params, u32 phy_type)
 {
-	params->clk_prep_buf = 50;
-	params->clk_zero_buf = 2;
-	params->clk_trail_buf = 30;
-	params->hs_prep_buf = 50;
-	params->hs_zero_buf = 10;
-	params->hs_trail_buf = 30;
-	params->hs_rqst_buf = 0;
-	params->hs_exit_buf = 10;
-	/* 1.25 is used in code for precision */
-	params->clk_pre_buf = 1;
-	params->clk_post_buf = 5;
+	if (phy_type == DSI_PHY_TYPE_CPHY) {
+		params->clk_prep_buf = 50;
+		params->clk_pre_buf = 20;
+		params->clk_post_buf = 80;
+		params->hs_rqst_buf = 1;
+		params->hs_exit_buf = 10;
+	} else {
+		params->clk_prep_buf = 50;
+		params->clk_zero_buf = 2;
+		params->clk_trail_buf = 30;
+		params->hs_prep_buf = 50;
+		params->hs_zero_buf = 10;
+		params->hs_trail_buf = 30;
+		params->hs_rqst_buf = 0;
+		params->hs_exit_buf = 10;
+		/* 1.25 is used in code for precision */
+		params->clk_pre_buf = 1;
+		params->clk_post_buf = 5;
+	}
 }
 
 int32_t dsi_phy_hw_v4_0_calc_clk_zero(s64 rec_temp1, s64 mult)
@@ -75,22 +83,37 @@
 
 void dsi_phy_hw_v4_0_update_timing_params(
 	struct dsi_phy_per_lane_cfgs *timing,
-	struct phy_timing_desc *desc)
+	struct phy_timing_desc *desc, u32 phy_type)
 {
-	timing->lane_v4[0] = 0x00;
-	timing->lane_v4[1] = desc->clk_zero.reg_value;
-	timing->lane_v4[2] = desc->clk_prepare.reg_value;
-	timing->lane_v4[3] = desc->clk_trail.reg_value;
-	timing->lane_v4[4] = desc->hs_exit.reg_value;
-	timing->lane_v4[5] = desc->hs_zero.reg_value;
-	timing->lane_v4[6] = desc->hs_prepare.reg_value;
-	timing->lane_v4[7] = desc->hs_trail.reg_value;
-	timing->lane_v4[8] = desc->hs_rqst.reg_value;
-	timing->lane_v4[9] = 0x02;
-	timing->lane_v4[10] = 0x04;
-	timing->lane_v4[11] = 0x00;
-	timing->lane_v4[12] = desc->clk_pre.reg_value;
-	timing->lane_v4[13] = desc->clk_post.reg_value;
+	if (phy_type == DSI_PHY_TYPE_CPHY) {
+		timing->lane_v4[0] = 0x00;
+		timing->lane_v4[1] = 0x00;
+		timing->lane_v4[2] = 0x00;
+		timing->lane_v4[3] = 0x00;
+		timing->lane_v4[4] = desc->hs_exit.reg_value;
+		timing->lane_v4[5] = desc->clk_pre.reg_value;
+		timing->lane_v4[6] = desc->clk_prepare.reg_value;
+		timing->lane_v4[7] = desc->clk_post.reg_value;
+		timing->lane_v4[8] = desc->hs_rqst.reg_value;
+		timing->lane_v4[9] = 0x02;
+		timing->lane_v4[10] = 0x04;
+		timing->lane_v4[11] = 0x00;
+	} else {
+		timing->lane_v4[0] = 0x00;
+		timing->lane_v4[1] = desc->clk_zero.reg_value;
+		timing->lane_v4[2] = desc->clk_prepare.reg_value;
+		timing->lane_v4[3] = desc->clk_trail.reg_value;
+		timing->lane_v4[4] = desc->hs_exit.reg_value;
+		timing->lane_v4[5] = desc->hs_zero.reg_value;
+		timing->lane_v4[6] = desc->hs_prepare.reg_value;
+		timing->lane_v4[7] = desc->hs_trail.reg_value;
+		timing->lane_v4[8] = desc->hs_rqst.reg_value;
+		timing->lane_v4[9] = 0x02;
+		timing->lane_v4[10] = 0x04;
+		timing->lane_v4[11] = 0x00;
+		timing->lane_v4[12] = desc->clk_pre.reg_value;
+		timing->lane_v4[13] = desc->clk_post.reg_value;
+	}
 
 	DSI_DEBUG("[%d %d %d %d]\n", timing->lane_v4[0],
 		timing->lane_v4[1], timing->lane_v4[2], timing->lane_v4[3]);
diff --git a/msm/msm_atomic.c b/msm/msm_atomic.c
index 9eafad3..cd5bca1 100644
--- a/msm/msm_atomic.c
+++ b/msm/msm_atomic.c
@@ -76,13 +76,17 @@
 					    v);
 }
 
-static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
+static inline bool _msm_seamless_for_crtc(struct drm_device *dev,
+					struct drm_atomic_state *state,
 			struct drm_crtc_state *crtc_state, bool enable)
 {
 	struct drm_connector *connector = NULL;
 	struct drm_connector_state  *conn_state = NULL;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
 	int i = 0;
 	int conn_cnt = 0;
+	bool splash_en = false;
 
 	if (msm_is_mode_seamless(&crtc_state->mode) ||
 		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode) ||
@@ -101,7 +105,11 @@
 					 crtc_state->crtc))
 				conn_cnt++;
 
-			if (MULTIPLE_CONN_DETECTED(conn_cnt))
+			if (kms && kms->funcs && kms->funcs->check_for_splash)
+				splash_en = kms->funcs->check_for_splash(kms,
+							 crtc_state->crtc);
+
+			if (MULTIPLE_CONN_DETECTED(conn_cnt) && !splash_en)
 				return true;
 		}
 	}
@@ -257,7 +265,7 @@
 		if (!old_crtc_state->active)
 			continue;
 
-		if (_msm_seamless_for_crtc(old_state, crtc->state, false))
+		if (_msm_seamless_for_crtc(dev, old_state, crtc->state, false))
 			continue;
 
 		funcs = crtc->helper_private;
@@ -405,7 +413,7 @@
 		if (!new_crtc_state->active)
 			continue;
 
-		if (_msm_seamless_for_crtc(old_state, crtc->state, true))
+		if (_msm_seamless_for_crtc(dev, old_state, crtc->state, true))
 			continue;
 
 		funcs = crtc->helper_private;
@@ -609,7 +617,7 @@
 	struct msm_drm_private *priv = dev->dev_private;
 	struct drm_crtc *crtc = NULL;
 	struct drm_crtc_state *crtc_state = NULL;
-	int ret = -EINVAL, i = 0, j = 0;
+	int ret = -ECANCELED, i = 0, j = 0;
 	bool nonblock;
 
 	/* cache since work will kfree commit in non-blocking case */
@@ -630,6 +638,7 @@
 				} else {
 					DRM_ERROR(" Error for crtc_id: %d\n",
 						priv->disp_thread[j].crtc_id);
+					ret = -EINVAL;
 				}
 				break;
 			}
@@ -645,13 +654,17 @@
 	}
 
 	if (ret) {
+		if (ret == -EINVAL)
+			DRM_ERROR("failed to dispatch commit to any CRTC\n");
+		else
+			DRM_DEBUG_DRIVER_RATELIMITED("empty crtc state\n");
+
 		/**
 		 * this is not expected to happen, but at this point the state
 		 * has been swapped, but we couldn't dispatch to a crtc thread.
 		 * fallback now to a synchronous complete_commit to try and
 		 * ensure that SW and HW state don't get out of sync.
 		 */
-		DRM_ERROR("failed to dispatch commit to any CRTC\n");
 		complete_commit(commit);
 	} else if (!nonblock) {
 		kthread_flush_work(&commit->commit_work);
@@ -727,6 +740,16 @@
 		c->plane_mask |= (1 << drm_plane_index(plane));
 	}
 
+	/* Protection for prepare_fence callback */
+retry:
+	ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
+		state->acquire_ctx);
+
+	if (ret == -EDEADLK) {
+		drm_modeset_backoff(state->acquire_ctx);
+		goto retry;
+	}
+
 	/*
 	 * Wait for pending updates on any of the same crtc's and then
 	 * mark our set of crtc's as busy:
diff --git a/msm/msm_drv.c b/msm/msm_drv.c
index 9b65de5..1e959be 100644
--- a/msm/msm_drv.c
+++ b/msm/msm_drv.c
@@ -1177,7 +1177,7 @@
 	 * if kms module is not yet initialized.
 	 */
 	if (!kms || (kms && kms->funcs && kms->funcs->check_for_splash
-		&& kms->funcs->check_for_splash(kms)))
+		&& kms->funcs->check_for_splash(kms, NULL)))
 		return;
 
 	/*
@@ -1496,24 +1496,27 @@
 	 * calls add to client list and return.
 	 */
 	count = msm_event_client_count(dev, req_event, false);
-	/* Add current client to list */
-	spin_lock_irqsave(&dev->event_lock, flag);
-	list_add_tail(&client->base.link, &priv->client_event_list);
-	spin_unlock_irqrestore(&dev->event_lock, flag);
-
-	if (count)
+	if (count) {
+		/* Add current client to list */
+		spin_lock_irqsave(&dev->event_lock, flag);
+		list_add_tail(&client->base.link, &priv->client_event_list);
+		spin_unlock_irqrestore(&dev->event_lock, flag);
 		return 0;
+	}
 
 	ret = msm_register_event(dev, req_event, file, true);
 	if (ret) {
 		DRM_ERROR("failed to enable event %x object %x object id %d\n",
 			req_event->event, req_event->object_type,
 			req_event->object_id);
-		spin_lock_irqsave(&dev->event_lock, flag);
-		list_del(&client->base.link);
-		spin_unlock_irqrestore(&dev->event_lock, flag);
 		kfree(client);
+	} else {
+		/* Add current client to list */
+		spin_lock_irqsave(&dev->event_lock, flag);
+		list_add_tail(&client->base.link, &priv->client_event_list);
+		spin_unlock_irqrestore(&dev->event_lock, flag);
 	}
+
 	return ret;
 }
 
diff --git a/msm/msm_kms.h b/msm/msm_kms.h
index 8472f84..de34cb6 100644
--- a/msm/msm_kms.h
+++ b/msm/msm_kms.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -123,7 +123,7 @@
 	/* handle continuous splash  */
 	int (*cont_splash_config)(struct msm_kms *kms);
 	/* check for continuous splash status */
-	bool (*check_for_splash)(struct msm_kms *kms);
+	bool (*check_for_splash)(struct msm_kms *kms, struct drm_crtc *crtc);
 	/* topology information */
 	int (*get_mixer_count)(const struct msm_kms *kms,
 			const struct drm_display_mode *mode,
diff --git a/msm/msm_notifier.c b/msm/msm_notifier.c
index 9435268..ed528b0 100644
--- a/msm/msm_notifier.c
+++ b/msm/msm_notifier.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -52,10 +52,11 @@
 	/*
 	 * Get ceiling of fps from notifier data to pass to scheduler.
 	 * Default will be FPS60 and sent to scheduler during suspend.
-	 * Currently scheduler expects FPS120 for any fps over 90.
 	 */
 	fps = notifier_data->refresh_rate;
-	if (fps > FPS90)
+	if (fps > FPS120)
+		sched_fps = FPS144;
+	else if (fps > FPS90)
 		sched_fps = FPS120;
 	else if (fps > FPS60)
 		sched_fps = FPS90;
diff --git a/msm/sde/sde_connector.c b/msm/sde/sde_connector.c
index 29a8433..e7fb277 100644
--- a/msm/sde/sde_connector.c
+++ b/msm/sde/sde_connector.c
@@ -1124,12 +1124,6 @@
 
 	connector = &c_conn->base;
 
-	if (!connector->hdr_supported) {
-		SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n");
-		rc = -ENOTSUPP;
-		goto end;
-	}
-
 	memset(&c_state->hdr_meta, 0, sizeof(c_state->hdr_meta));
 
 	if (!usr_ptr) {
@@ -1137,6 +1131,12 @@
 		goto end;
 	}
 
+	if (!connector->hdr_supported) {
+		SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n");
+		rc = -ENOTSUPP;
+		goto end;
+	}
+
 	if (copy_from_user(&c_state->hdr_meta,
 		(void __user *)usr_ptr,
 			sizeof(*hdr_meta))) {
diff --git a/msm/sde/sde_core_perf.c b/msm/sde/sde_core_perf.c
index 787244e..3add93d 100644
--- a/msm/sde/sde_core_perf.c
+++ b/msm/sde/sde_core_perf.c
@@ -105,12 +105,15 @@
 	if (!old_perf)
 		return;
 
-	if (!perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] &&
-		!perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] &&
-		!perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI] &&
-		!perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] &&
+	if (!perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC] &&
+		!perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC] &&
 		state->plane_mask) {
 
+		perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC] =
+			old_perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC];
+		perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC] =
+			old_perf->max_per_pipe_ib
+					[SDE_POWER_HANDLE_DBUS_ID_MNOC];
 		perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
 			old_perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC];
 		perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] =
@@ -120,6 +123,9 @@
 		perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] =
 		  old_perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI];
 
+		if (!old_perf->core_clk_rate)
+			perf->core_clk_rate = old_perf->core_clk_rate;
+
 		for (i = 0; i < new_cstate->num_connectors; i++) {
 			conn = new_cstate->connectors[i];
 			if (!conn)
@@ -130,13 +136,14 @@
 				is_doze_suspend = true;
 		}
 
-		if (!is_doze_suspend && conn && c_conn) {
-			SDE_DEBUG("No BW, planes:%x dpms_mode:%d lpmode:%d\n",
+		if (!is_doze_suspend && conn && c_conn)
+			SDE_ERROR("No BW, planes:%x dpms_mode:%d lpmode:%d\n",
 				state->plane_mask, c_conn->dpms_mode,
 				sde_connector_get_lp(conn));
+		if (conn && c_conn)
 			SDE_EVT32(state->plane_mask, c_conn->dpms_mode,
-				sde_connector_get_lp(conn), SDE_EVTLOG_ERROR);
-		}
+				sde_connector_get_lp(conn), is_doze_suspend,
+				SDE_EVTLOG_ERROR);
 	}
 }
 
diff --git a/msm/sde/sde_crtc.c b/msm/sde/sde_crtc.c
index c5b6224..f16fe8c 100644
--- a/msm/sde/sde_crtc.c
+++ b/msm/sde/sde_crtc.c
@@ -4247,6 +4247,9 @@
 {
 	struct drm_plane *plane;
 	int i;
+	struct drm_crtc_state *old_state = crtc->state;
+	struct sde_crtc_state *old_cstate = to_sde_crtc_state(old_state);
+
 	if (secure == SDE_DRM_SEC_ONLY) {
 		/*
 		 * validate planes - only fb_sec_dir is allowed during sec_crtc
@@ -4307,6 +4310,8 @@
 		 * - fail empty commit
 		 * - validate dim_layer or plane is staged in the supported
 		 *   blendstage
+		 * - fail if previous commit has no planes staged and
+		 *   no dim layer at highest blendstage.
 		 */
 		if (sde_kms->catalog->sui_supported_blendstage) {
 			int sec_stage = cnt ? pstates[0].sde_pstate->stage :
@@ -4324,6 +4329,14 @@
 					cstate->num_dim_layers, sec_stage);
 				return -EINVAL;
 			}
+
+			if (!old_state->plane_mask &&
+				!old_cstate->num_dim_layers) {
+				SDE_ERROR(
+				"crtc%d: no dim layer in nonsecure to secure transition\n",
+					DRMID(crtc));
+				return -EINVAL;
+			}
 		}
 	}
 
@@ -5892,6 +5905,7 @@
 			pstate->stage);
 
 		fence = pstate->input_fence;
+		SDE_EVT32(DRMID(crtc), fence);
 		if (fence)
 			sde_fence_list_dump(fence, &s);
 	}
diff --git a/msm/sde/sde_encoder.c b/msm/sde/sde_encoder.c
index 5d97711..13de591 100644
--- a/msm/sde/sde_encoder.c
+++ b/msm/sde/sde_encoder.c
@@ -3447,7 +3447,8 @@
 
 	_sde_encoder_input_handler_register(drm_enc);
 
-	if ((drm_enc->crtc->state->connectors_changed &&
+	if ((drm_enc->crtc && drm_enc->crtc->state &&
+			drm_enc->crtc->state->connectors_changed &&
 			sde_encoder_in_clone_mode(drm_enc)) ||
 			!(msm_is_mode_seamless_vrr(cur_mode)
 			|| msm_is_mode_seamless_dms(cur_mode)
@@ -3811,6 +3812,11 @@
 	SDE_DEBUG_ENC(sde_enc, "\n");
 	SDE_EVT32(DRMID(drm_enc), enable);
 
+	if (sde_encoder_in_clone_mode(drm_enc)) {
+		SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_ERROR);
+		return;
+	}
+
 	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
 	sde_enc->crtc_vblank_cb = vbl_cb;
 	sde_enc->crtc_vblank_cb_data = vbl_data;
@@ -3882,20 +3888,15 @@
 			if (sde_enc->phys_encs[i] == ready_phys) {
 				SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
 				     atomic_read(&sde_enc->frame_done_cnt[i]));
-				if (!atomic_add_unless(
-					&sde_enc->frame_done_cnt[i], 1, 1)) {
+				if (atomic_inc_return(
+					&sde_enc->frame_done_cnt[i]) > 1)
 					SDE_EVT32(DRMID(drm_enc), event,
 						ready_phys->intf_idx,
 						SDE_EVTLOG_ERROR);
-					SDE_ERROR_ENC(sde_enc,
-						"intf idx:%d, event:%d\n",
-						ready_phys->intf_idx, event);
-					return;
-				}
 			}
 
 			if (topology != SDE_RM_TOPOLOGY_PPSPLIT &&
-			    atomic_read(&sde_enc->frame_done_cnt[i]) != 1)
+			    !atomic_read(&sde_enc->frame_done_cnt[i]))
 				trigger = false;
 		}
 
@@ -3908,7 +3909,7 @@
 					&sde_enc->crtc_frame_event_cb_data,
 					event);
 			for (i = 0; i < sde_enc->num_phys_encs; i++)
-				atomic_set(&sde_enc->frame_done_cnt[i], 0);
+				atomic_dec(&sde_enc->frame_done_cnt[i]);
 		}
 	} else if (sde_enc->crtc_frame_event_cb) {
 		if (!is_cmd_mode)
diff --git a/msm/sde/sde_encoder_phys_vid.c b/msm/sde/sde_encoder_phys_vid.c
index ce778f0..54faa7c 100644
--- a/msm/sde/sde_encoder_phys_vid.c
+++ b/msm/sde/sde_encoder_phys_vid.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
@@ -270,9 +270,9 @@
 	m = phys_enc->sde_kms->catalog;
 
 	vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc,
-							   timing, true);
+							   timing, false);
 	if (vfp_fetch_lines) {
-		vert_total = get_vertical_total(timing, true);
+		vert_total = get_vertical_total(timing, false);
 		horiz_total = get_horizontal_total(timing);
 		vfp_fetch_start_vsync_counter =
 			(vert_total - vfp_fetch_lines) * horiz_total + 1;
diff --git a/msm/sde/sde_encoder_phys_wb.c b/msm/sde/sde_encoder_phys_wb.c
index eace7d5..1530f0c 100644
--- a/msm/sde/sde_encoder_phys_wb.c
+++ b/msm/sde/sde_encoder_phys_wb.c
@@ -548,6 +548,13 @@
 		intf_cfg_v1->wb_count = num_wb;
 		intf_cfg_v1->wb[0] = hw_wb->idx;
 		if (SDE_FORMAT_IS_YUV(format)) {
+			if (!phys_enc->hw_cdm) {
+				SDE_ERROR("Format:YUV but no cdm allocated\n");
+				SDE_EVT32(DRMID(phys_enc->parent),
+							 SDE_EVTLOG_ERROR);
+				return;
+			}
+
 			intf_cfg_v1->cdm_count = num_wb;
 			intf_cfg_v1->cdm[0] = hw_cdm->idx;
 		}
@@ -582,18 +589,16 @@
 
 }
 
-static void _sde_enc_phys_wb_detect_cwb(struct sde_encoder_phys *phys_enc,
+static bool _sde_enc_phys_wb_detect_cwb(struct sde_encoder_phys *phys_enc,
 		struct drm_crtc_state *crtc_state)
 {
 	struct drm_encoder *encoder;
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	const struct sde_wb_cfg *wb_cfg = wb_enc->hw_wb->caps;
 
-	phys_enc->in_clone_mode = false;
-
 	/* Check if WB has CWB support */
 	if (!(wb_cfg->features & BIT(SDE_WB_HAS_CWB)))
-		return;
+		return false;
 
 	/* if any other encoder is connected to same crtc enable clone mode*/
 	drm_for_each_encoder(encoder, crtc_state->crtc->dev) {
@@ -601,12 +606,11 @@
 			continue;
 
 		if (phys_enc->parent != encoder) {
-			phys_enc->in_clone_mode = true;
-			break;
+			return true;
 		}
 	}
 
-	SDE_DEBUG("detect CWB - status:%d\n", phys_enc->in_clone_mode);
+	return false;
 }
 
 static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc,
@@ -686,6 +690,7 @@
 	struct sde_rect wb_roi;
 	const struct drm_display_mode *mode = &crtc_state->mode;
 	int rc;
+	bool clone_mode_curr = false;
 
 	SDE_DEBUG("[atomic_check:%d,%d,\"%s\",%d,%d]\n",
 			hw_wb->idx - WB_0, mode->base.id, mode->name,
@@ -701,8 +706,20 @@
 		return -EINVAL;
 	}
 
-	_sde_enc_phys_wb_detect_cwb(phys_enc, crtc_state);
+	clone_mode_curr = _sde_enc_phys_wb_detect_cwb(phys_enc, crtc_state);
 
+	/**
+	 * Fail the WB commit when there is a CWB session enabled in HW.
+	 * CWB session needs to be disabled since WB and CWB share the same
+	 * writeback hardware block.
+	 */
+	if (phys_enc->in_clone_mode && !clone_mode_curr) {
+		SDE_ERROR("WB commit before CWB disable\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("detect CWB - status:%d\n", clone_mode_curr);
+	phys_enc->in_clone_mode = clone_mode_curr;
 	memset(&wb_roi, 0, sizeof(struct sde_rect));
 
 	rc = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi);
@@ -714,11 +731,10 @@
 	SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi.x, wb_roi.y,
 			wb_roi.w, wb_roi.h);
 
-	/* bypass check if commit with no framebuffer */
 	fb = sde_wb_connector_state_get_output_fb(conn_state);
 	if (!fb) {
-		SDE_DEBUG("no output framebuffer\n");
-		return 0;
+		SDE_ERROR("no output framebuffer\n");
+		return -EINVAL;
 	}
 
 	SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
diff --git a/msm/sde/sde_hw_catalog.c b/msm/sde/sde_hw_catalog.c
index 8ebbcb4..2e8bb2a 100644
--- a/msm/sde/sde_hw_catalog.c
+++ b/msm/sde/sde_hw_catalog.c
@@ -3209,11 +3209,22 @@
 			of_fdt_get_ddrtype() == LP_DDR4_TYPE)
 		cfg->mdp[0].highest_bank_bit = 0x02;
 
+	cfg->mdp[0].ubwc_static = PROP_VALUE_ACCESS(prop_value, UBWC_STATIC, 0);
+	if (!prop_exists[UBWC_STATIC])
+		cfg->mdp[0].ubwc_static = DEFAULT_SDE_UBWC_STATIC;
+
 	if (IS_SDE_MAJOR_MINOR_SAME(cfg->hwversion, SDE_HW_VER_630)) {
 		ret = _sde_get_ubwc_hbb(prop_exists, prop_value);
 
-		if (ret >= 0)
+		if (ret >= 0) {
+			u32 ubwc_static, hbb;
+
 			cfg->mdp[0].highest_bank_bit = ret;
+			ubwc_static = cfg->mdp[0].ubwc_static;
+			hbb = ((cfg->mdp[0].highest_bank_bit & 0x7) << 4);
+			ubwc_static = ((ubwc_static & 0xff8f) | hbb);
+			cfg->mdp[0].ubwc_static = ubwc_static;
+		}
 	}
 
 	cfg->macrotile_mode = PROP_VALUE_ACCESS(prop_value, MACROTILE_MODE, 0);
@@ -3223,10 +3234,6 @@
 	cfg->ubwc_bw_calc_version =
 		PROP_VALUE_ACCESS(prop_value, UBWC_BW_CALC_VERSION, 0);
 
-	cfg->mdp[0].ubwc_static = PROP_VALUE_ACCESS(prop_value, UBWC_STATIC, 0);
-	if (!prop_exists[UBWC_STATIC])
-		cfg->mdp[0].ubwc_static = DEFAULT_SDE_UBWC_STATIC;
-
 	cfg->mdp[0].ubwc_swizzle = PROP_VALUE_ACCESS(prop_value,
 			UBWC_SWIZZLE, 0);
 	if (!prop_exists[UBWC_SWIZZLE])
@@ -4295,7 +4302,7 @@
 		sde_cfg->has_cwb_support = true;
 		sde_cfg->has_wb_ubwc = true;
 		sde_cfg->has_qsync = true;
-		sde_cfg->perf.min_prefill_lines = 24;
+		sde_cfg->perf.min_prefill_lines = 35;
 		sde_cfg->vbif_qos_nlvl = 8;
 		sde_cfg->ts_prefill_rev = 2;
 		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
@@ -4345,7 +4352,7 @@
 	} else if (IS_LAGOON_TARGET(hw_rev)) {
 		sde_cfg->has_cwb_support = true;
 		sde_cfg->has_qsync = true;
-		sde_cfg->perf.min_prefill_lines = 24;
+		sde_cfg->perf.min_prefill_lines = 35;
 		sde_cfg->vbif_qos_nlvl = 8;
 		sde_cfg->ts_prefill_rev = 2;
 		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
@@ -4359,6 +4366,7 @@
 		sde_cfg->has_hdr = true;
 		sde_cfg->has_vig_p010 = true;
 		sde_cfg->true_inline_rot_rev = SDE_INLINE_ROT_VERSION_2_0_0;
+		sde_cfg->has_3d_merge_reset = true;
 	} else if (IS_SCUBA_TARGET(hw_rev)) {
 		sde_cfg->has_cwb_support = false;
 		sde_cfg->has_qsync = true;
@@ -4372,6 +4380,7 @@
 		sde_cfg->sui_block_xin_mask = 0x1;
 		sde_cfg->has_hdr = false;
 		sde_cfg->has_sui_blendstage = true;
+		sde_cfg->allow_gdsc_toggle = true;
 		clear_bit(MDSS_INTR_AD4_0_INTR, sde_cfg->mdss_irqs);
 		clear_bit(MDSS_INTR_AD4_1_INTR, sde_cfg->mdss_irqs);
 	} else {
diff --git a/msm/sde/sde_hw_color_proc_v4.c b/msm/sde/sde_hw_color_proc_v4.c
index f266683..19ccb25 100644
--- a/msm/sde/sde_hw_color_proc_v4.c
+++ b/msm/sde/sde_hw_color_proc_v4.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 #include <drm/msm_drm_pp.h>
 #include "sde_hw_color_proc_common_v4.h"
@@ -335,7 +335,11 @@
 	op_mode = SDE_REG_READ(&ctx->hw, offset);
 
 	if (!enable) {
-		op_mode &= ~BIT(0);
+		if (op_mode & BIT(1))
+			op_mode &= ~BIT(0);
+		else
+			op_mode = 0;
+
 		SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x4,
 			(op_mode & 0x1FFFFFF));
 		return;
diff --git a/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/msm/sde/sde_hw_reg_dma_v1_color_proc.c
index ae789c2..4d7a4d0 100644
--- a/msm/sde/sde_hw_reg_dma_v1_color_proc.c
+++ b/msm/sde/sde_hw_reg_dma_v1_color_proc.c
@@ -3586,16 +3586,10 @@
 	}
 }
 
-static void ltm_vlutv1_disable(struct sde_hw_dspp *ctx, void *cfg,
-		u32 num_mixers, enum sde_ltm *dspp_idx)
+static void ltm_vlutv1_disable(struct sde_hw_dspp *ctx)
 {
-	struct sde_hw_cp_cfg *hw_cfg = cfg;
-	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
-	int rc, i = 0;
 	enum sde_ltm idx = 0;
-	u32 opmode = 0;
+	u32 opmode = 0, offset = 0;
 
 	idx = (enum sde_ltm)ctx->idx;
 	if (idx >= LTM_MAX) {
@@ -3603,40 +3597,15 @@
 		return;
 	}
 
-	dma_ops = sde_reg_dma_get_ops();
-	dma_ops->reset_reg_dma_buf(ltm_buf[LTM_VLUT][idx]);
-	REG_DMA_INIT_OPS(dma_write_cfg, ltm_mapping[idx], LTM_VLUT,
-			ltm_buf[LTM_VLUT][idx]);
-
-	for (i = 0; i < num_mixers; i++) {
-		dma_write_cfg.blk = ltm_mapping[dspp_idx[i]];
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0,
-				0, 0);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("write decode select failed ret %d\n", rc);
-			return;
-		}
-
-		ltm_vlut_ops_mask[dspp_idx[i]] &= ~ltm_vlut;
+	offset = ctx->cap->sblk->ltm.base + 0x4;
+	ltm_vlut_ops_mask[ctx->idx] &= ~ltm_vlut;
+	opmode = SDE_REG_READ(&ctx->hw, offset);
+	if (opmode & BIT(0))
 		/* disable VLUT/INIT/ROI */
-		REG_DMA_SETUP_OPS(dma_write_cfg, 0x04, &opmode, sizeof(opmode),
-			REG_SINGLE_MODIFY, 0, 0,
-			REG_DMA_LTM_VLUT_DISABLE_OP_MASK);
-		rc = dma_ops->setup_payload(&dma_write_cfg);
-		if (rc) {
-			DRM_ERROR("opmode write failed ret %d\n", rc);
-			return;
-		}
-	}
-
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_VLUT][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		return;
-	}
+		opmode &= REG_DMA_LTM_VLUT_DISABLE_OP_MASK;
+	else
+		opmode = 0;
+	SDE_REG_WRITE(&ctx->hw, offset, opmode);
 }
 
 void reg_dmav1_setup_ltm_vlutv1(struct sde_hw_dspp *ctx, void *cfg)
@@ -3656,6 +3625,13 @@
 	if (rc)
 		return;
 
+	/* disable case */
+	if (!hw_cfg->payload) {
+		DRM_DEBUG_DRIVER("Disable LTM vlut feature\n");
+		ltm_vlutv1_disable(ctx);
+		return;
+	}
+
 	idx = (enum sde_ltm)ctx->idx;
 	num_mixers = hw_cfg->num_of_mixers;
 	rc = reg_dmav1_get_ltm_blk(hw_cfg, idx, &dspp_idx[0], &blk);
@@ -3665,13 +3641,6 @@
 		return;
 	}
 
-	/* disable case */
-	if (!hw_cfg->payload) {
-		DRM_DEBUG_DRIVER("Disable LTM vlut feature\n");
-		ltm_vlutv1_disable(ctx, cfg, num_mixers, dspp_idx);
-		return;
-	}
-
 	if (hw_cfg->len != sizeof(struct drm_msm_ltm_data)) {
 		DRM_ERROR("invalid size of payload len %d exp %zd\n",
 				hw_cfg->len, sizeof(struct drm_msm_ltm_data));
diff --git a/msm/sde/sde_kms.c b/msm/sde/sde_kms.c
index 2980406..6eb5618 100644
--- a/msm/sde/sde_kms.c
+++ b/msm/sde/sde_kms.c
@@ -1274,7 +1274,7 @@
 {
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state;
-	int i, rc;
+	int i;
 
 	if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
 		SDE_ERROR("invalid argument(s)\n");
@@ -1282,15 +1282,6 @@
 	}
 
 	SDE_ATRACE_BEGIN("sde_kms_prepare_fence");
-retry:
-	/* attempt to acquire ww mutex for connection */
-	rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
-			       old_state->acquire_ctx);
-
-	if (rc == -EDEADLK) {
-		drm_modeset_backoff(old_state->acquire_ctx);
-		goto retry;
-	}
 
 	/* old_state actually contains updated crtc pointers */
 	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
@@ -2828,9 +2819,10 @@
 	return rc;
 }
 
-static bool sde_kms_check_for_splash(struct msm_kms *kms)
+static bool sde_kms_check_for_splash(struct msm_kms *kms, struct drm_crtc *crtc)
 {
 	struct sde_kms *sde_kms;
+	struct drm_encoder *encoder;
 
 	if (!kms) {
 		SDE_ERROR("invalid kms\n");
@@ -2838,7 +2830,18 @@
 	}
 
 	sde_kms = to_sde_kms(kms);
-	return sde_kms->splash_data.num_splash_displays;
+
+	if (!crtc || !sde_kms->splash_data.num_splash_displays)
+		return !!sde_kms->splash_data.num_splash_displays;
+
+	drm_for_each_encoder_mask(encoder, crtc->dev,
+			crtc->state->encoder_mask) {
+		if (sde_encoder_in_cont_splash(encoder))
+			return true;
+	}
+
+	return false;
+
 }
 
 static int sde_kms_get_mixer_count(const struct msm_kms *kms,
diff --git a/msm/sde/sde_plane.c b/msm/sde/sde_plane.c
index 3062b7a..e834c5d 100644
--- a/msm/sde/sde_plane.c
+++ b/msm/sde/sde_plane.c
@@ -248,6 +248,7 @@
 	u32 frame_rate, qos_count, fps_index = 0, lut_index, index;
 	struct sde_perf_cfg *perf;
 	struct sde_plane_state *pstate;
+	struct sde_kms *kms;
 
 	if (!plane || !fb) {
 		SDE_ERROR("invalid arguments\n");
@@ -256,6 +257,11 @@
 
 	psde = to_sde_plane(plane);
 	pstate = to_sde_plane_state(plane->state);
+	kms = _sde_plane_get_kms(plane);
+	if (!kms) {
+		SDE_ERROR("invalid kms\n");
+		return;
+	}
 
 	if (!psde->pipe_hw || !psde->pipe_sblk || !psde->catalog) {
 		SDE_ERROR("invalid arguments\n");
@@ -282,7 +288,12 @@
 				fb->format->format,
 				fb->modifier);
 
-		if (fmt && SDE_FORMAT_IS_LINEAR(fmt))
+		if (fmt && SDE_FORMAT_IS_LINEAR(fmt) &&
+			pstate->scaler3_cfg.enable &&
+			IS_SDE_MAJOR_MINOR_SAME(kms->catalog->hwversion,
+							 SDE_HW_VER_640))
+			lut_index = SDE_QOS_LUT_USAGE_MACROTILE_QSEED;
+		else if (fmt && SDE_FORMAT_IS_LINEAR(fmt))
 			lut_index = SDE_QOS_LUT_USAGE_LINEAR;
 		else if (pstate->scaler3_cfg.enable)
 			lut_index = SDE_QOS_LUT_USAGE_MACROTILE_QSEED;
@@ -4118,6 +4129,7 @@
 	/* remove ref count for fence */
 	if (pstate->input_fence)
 		sde_sync_put(pstate->input_fence);
+	pstate->input_fence = 0;
 
 	/* destroy value helper */
 	msm_property_destroy_state(&psde->property_info, pstate,
diff --git a/msm/sde/sde_rm.c b/msm/sde/sde_rm.c
index 24c830c..7f3716b 100644
--- a/msm/sde/sde_rm.c
+++ b/msm/sde/sde_rm.c
@@ -33,6 +33,9 @@
 				(t).num_intf == (r).num_intf)
 #define IS_COMPATIBLE_PP_DSC(p, d) (p % 2 == d % 2)
 
+/* ~one vsync poll time for rsvp_nxt to cleared by modeset from commit thread */
+#define RM_NXT_CLEAR_POLL_TIMEOUT_US 16600
+
 /**
  * toplogy information to be used when ctl path version does not
  * support driving more than one interface per ctl_path
@@ -57,12 +60,12 @@
 	{   SDE_RM_TOPOLOGY_NONE,                 0, 0, 0, 0, false },
 	{   SDE_RM_TOPOLOGY_SINGLEPIPE,           1, 0, 1, 1, false },
 	{   SDE_RM_TOPOLOGY_SINGLEPIPE_DSC,       1, 1, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 1, true  },
-	{   SDE_RM_TOPOLOGY_DUALPIPE_DSC,         2, 2, 2, 1, true  },
+	{   SDE_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 1, false },
+	{   SDE_RM_TOPOLOGY_DUALPIPE_DSC,         2, 2, 2, 1, false },
 	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE,     2, 0, 1, 1, false },
 	{   SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC, 2, 1, 1, 1, false },
 	{   SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE,    2, 2, 1, 1, false },
-	{   SDE_RM_TOPOLOGY_PPSPLIT,              1, 0, 2, 1, true  },
+	{   SDE_RM_TOPOLOGY_PPSPLIT,              1, 0, 2, 1, false },
 };
 
 
@@ -2103,6 +2106,30 @@
 	return ret;
 }
 
+/* call this only after rm_mutex held */
+struct sde_rm_rsvp *_sde_rm_poll_get_rsvp_nxt_locked(struct sde_rm *rm,
+		struct drm_encoder *enc)
+{
+	int i;
+	u32 loop_count = 20;
+	struct sde_rm_rsvp *rsvp_nxt = NULL;
+	u32 sleep = RM_NXT_CLEAR_POLL_TIMEOUT_US / loop_count;
+
+	for (i = 0; i < loop_count; i++) {
+		rsvp_nxt = _sde_rm_get_rsvp_nxt(rm, enc);
+		if (!rsvp_nxt)
+			return rsvp_nxt;
+
+		mutex_unlock(&rm->rm_lock);
+		SDE_DEBUG("iteration i:%d sleep range:%uus to %uus\n",
+				i, sleep, sleep * 2);
+		usleep_range(sleep, sleep * 2);
+		mutex_lock(&rm->rm_lock);
+	}
+
+	return rsvp_nxt;
+}
+
 int sde_rm_reserve(
 		struct sde_rm *rm,
 		struct drm_encoder *enc,
@@ -2154,16 +2181,19 @@
 	 * commit rsvps. This rsvp_nxt can be cleared by a back to back
 	 * check_only commit with modeset when its predecessor atomic
 	 * commit is delayed / not committed the reservation yet.
-	 * Bail out in such cases so that check only commit
-	 * comes again after earlier commit gets processed.
+	 * Poll for rsvp_nxt clear, allow the check_only commit if rsvp_nxt
+	 * gets cleared and bailout if it does not get cleared before timeout.
 	 */
-
 	if (test_only && rsvp_cur && rsvp_nxt) {
-		SDE_ERROR("cur %d nxt %d enc %d conn %d\n", rsvp_cur->seq,
-			 rsvp_nxt->seq, enc->base.id,
-			 conn_state->connector->base.id);
-		ret = -EINVAL;
-		goto end;
+		rsvp_nxt = _sde_rm_poll_get_rsvp_nxt_locked(rm, enc);
+		if (rsvp_nxt) {
+			SDE_ERROR("poll timeout cur %d nxt %d enc %d\n",
+				rsvp_cur->seq, rsvp_nxt->seq, enc->base.id);
+			SDE_EVT32(rsvp_cur->seq, rsvp_nxt->seq,
+					 enc->base.id, SDE_EVTLOG_ERROR);
+			ret = -EINVAL;
+			goto end;
+		}
 	}
 
 	if (!test_only && rsvp_nxt)
diff --git a/msm/sde_rsc_hw_v3.c b/msm/sde_rsc_hw_v3.c
index d3a589c..5a5a692 100644
--- a/msm/sde_rsc_hw_v3.c
+++ b/msm/sde_rsc_hw_v3.c
@@ -106,17 +106,17 @@
 
 	/* Mode - 2 sequence */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
-						0xbdf9b9a0, rsc->debug_mode);
+						0xf9b9baa0, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
-						0xa13899fe, rsc->debug_mode);
+						0x999afebd, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
-						0xe0ac81e1, rsc->debug_mode);
+						0x81e1a138, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
-						0x3982e2a2, rsc->debug_mode);
+						0xe2a2e0ac, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
-						0x208cfd9d, rsc->debug_mode);
+						0xfd9d3982, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
-						0x20202020, rsc->debug_mode);
+						0x2020208c, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
 						0x20202020, rsc->debug_mode);
 
diff --git a/pll/dsi_pll_7nm.c b/pll/dsi_pll_7nm.c
index c3a8ff2..581fa3b 100644
--- a/pll/dsi_pll_7nm.c
+++ b/pll/dsi_pll_7nm.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"%s: " fmt, __func__
@@ -426,6 +426,12 @@
 	int rc = 0;
 	struct mdss_pll_resources *rsc = context;
 
+	/* Return cached cfg1 as its updated with cached cfg1 in pll_enable */
+	if (!rsc->handoff_resources) {
+		*val = (rsc->cached_cfg1) & 0x3;
+		return rc;
+	}
+
 	rc = mdss_pll_resource_enable(rsc, true);
 	if (rc)
 		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
@@ -1130,7 +1136,8 @@
 	MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL6,
 			   (PLL_CMODE_1 + offset),
 			   (PLL_CLOCK_INVERTERS_1 + offset),
-			   0x10, reg->pll_clock_inverters);
+			    pll->cphy_enabled ? 0x00 : 0x10,
+			    reg->pll_clock_inverters);
 	upper_addr |=
 		(upper_8_bit(PLL_CMODE_1 + offset) << 12);
 	upper_addr |= (upper_8_bit(PLL_CLOCK_INVERTERS_1 + offset) << 13);
@@ -1610,6 +1617,11 @@
 		return 0;
 	}
 
+	if (!pll->priv) {
+		pr_err("pll priv is null\n");
+		return 0;
+	}
+
 	/*
 	 * In the case when vco arte is set, the recalculation function should
 	 * return the current rate as to avoid trying to set the vco rate
@@ -1629,6 +1641,7 @@
 	}
 
 	pll->handoff_resources = true;
+	dsi_pll_detect_phy_mode(pll->priv, pll);
 	if (dsi_pll_7nm_lock_status(pll)) {
 		pr_debug("PLL not enabled\n");
 		pll->handoff_resources = false;
@@ -1837,25 +1850,25 @@
  *      |  DIV(8)   |  |  DIV(7)   |   |  |   DIV (2)   | | |   DIV(4)   |  |  DIV(3.5)  |
  *      +-----+-----+  +-----+-----+   |  +------+------+ | +-----+------+  +------+-----+
  *            |              |         |         |        |       |                |
- *Shadow Path |          CPHY Path     |         |        |       |           +----v
- *     +      |              |         +------+  |        |   +---+           |
- *     +---+  |        +-----+                |  |        |   |               |
- *         |  |        |                    +-v--v----v---v---+      +--------v--------+
- *     +---v--v--------v---+                 \  pclk_src_mux /        \ cphy_pclk_src /
+ *Shadow DPHY | Shadow   CPHY Path     |         |        |       |           +----v
+ *    Path    | CPHY Path    |         +------+  |        |   +---+           |
+ *     +---+  |    |   +-----+                |  |        |   |               |
+ *         |  |    |   |                    +-v--v----v---v---+      +--------v--------+
+ *     +---v--v----v---v---+                 \  pclk_src_mux /        \ cphy_pclk_src /
  *      \   byteclk_mux   /                   \             /          \     mux     /
  *       \               /                     +-----+-----+            +-----+-----+
- *        +------+------+                            |      Shadow Path       |
- *               |                                   |           +            |
+ *        +------+------+                            |         Shadow         |
+ *               |                                   |        DPHY Path       |
  *               v                             +-----v------+    |     +------v------+
  *         dsi_byte_clk                        |  pclk_src  |    |     |cphy_pclk_src|
  *                                             | DIV(1..15) |    |     |  DIV(1..15) |
  *                                             +-----+------+    |     +------+------+
  *                                                   |           |            |
  *                                                   |           |        CPHY Path
- *                                                   |           |            |
- *                                                   +-------+   |    +-------+
- *                                                           |   |    |
- *                                                       +---v---v----v------+
+ *                                                   |           |            |     Shadow CPHY Path
+ *                                                   +-------+   |    +-------+           |
+ *                                                           |   |    |   |----------------
+ *                                                       +---v---v----v---v--+
  *                                                        \     pclk_mux    /
  *                                                          +------+------+
  *                                                                 |
@@ -2084,6 +2097,17 @@
 	},
 };
 
+static struct clk_fixed_factor dsi0pll_shadow_post_vco_div3_5 = {
+	.div = 7,
+	.mult = 2,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_post_vco_div3_5",
+		.parent_names = (const char *[]){"dsi0pll_shadow_pll_out_div"},
+		.num_parents = 1,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_post_vco_div3_5 = {
 	.div = 7,
 	.mult = 2,
@@ -2095,6 +2119,17 @@
 	},
 };
 
+static struct clk_fixed_factor dsi1pll_shadow_post_vco_div3_5 = {
+	.div = 7,
+	.mult = 2,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_post_vco_div3_5",
+		.parent_names = (const char *[]){"dsi1pll_shadow_pll_out_div"},
+		.num_parents = 1,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_shadow_post_vco_div = {
 	.div = 4,
 	.mult = 1,
@@ -2154,6 +2189,18 @@
 	},
 };
 
+static struct clk_fixed_factor dsi0pll_shadow_cphy_byteclk_src = {
+	.div = 7,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_cphy_byteclk_src",
+		.parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_cphy_byteclk_src = {
 	.div = 7,
 	.mult = 1,
@@ -2166,6 +2213,18 @@
 	},
 };
 
+static struct clk_fixed_factor dsi1pll_shadow_cphy_byteclk_src = {
+	.div = 7,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_cphy_shadow_byteclk_src",
+		.parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
 static struct clk_fixed_factor dsi1pll_shadow_byteclk_src = {
 	.div = 8,
 	.mult = 1,
@@ -2230,8 +2289,9 @@
 			.name = "dsi0_phy_pll_out_byteclk",
 			.parent_names = (const char *[]){"dsi0pll_byteclk_src",
 				"dsi0pll_shadow_byteclk_src",
-				"dsi0pll_cphy_byteclk_src"},
-			.num_parents = 3,
+				"dsi0pll_cphy_byteclk_src",
+				"dsi0pll_shadow_cphy_byteclk_src"},
+			.num_parents = 4,
 			.flags = (CLK_SET_RATE_PARENT |
 					CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
@@ -2247,8 +2307,9 @@
 			.name = "dsi1_phy_pll_out_byteclk",
 			.parent_names = (const char *[]){"dsi1pll_byteclk_src",
 				"dsi1pll_shadow_byteclk_src",
-				"dsi1pll_cphy_byteclk_src"},
-			.num_parents = 3,
+				"dsi1pll_cphy_byteclk_src",
+				"dsi1pll_shadow_cphy_byteclk_src"},
+			.num_parents = 4,
 			.flags = (CLK_SET_RATE_PARENT |
 					CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
@@ -2302,6 +2363,22 @@
 	},
 };
 
+static struct clk_regmap_mux dsi0pll_shadow_cphy_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_cphy_pclk_src_mux",
+			.parent_names =
+			(const char *[]){
+				"dsi0pll_shadow_post_vco_div3_5"},
+			.num_parents = 1,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
 static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
 	.reg = PHY_CMN_CLK_CFG1,
 	.shift = 0,
@@ -2348,6 +2425,22 @@
 	},
 };
 
+static struct clk_regmap_mux dsi1pll_shadow_cphy_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_cphy_pclk_src_mux",
+			.parent_names =
+			(const char *[]){
+				"dsi1pll_shadow_post_vco_div3_5"},
+			.num_parents = 1,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi0pll_pclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -2396,6 +2489,22 @@
 	},
 };
 
+static struct clk_regmap_div dsi0pll_shadow_cphy_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_cphy_pclk_src",
+			.parent_names = (const char *[]){
+				"dsi0pll_shadow_cphy_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_div dsi1pll_pclk_src = {
 	.shift = 0,
 	.width = 4,
@@ -2444,6 +2553,22 @@
 	},
 };
 
+static struct clk_regmap_div dsi1pll_shadow_cphy_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_cphy_pclk_src",
+			.parent_names = (const char *[]){
+				"dsi1pll_shadow_cphy_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
 static struct clk_regmap_mux dsi0pll_pclk_mux = {
 	.shift = 0,
 	.width = 1,
@@ -2452,8 +2577,9 @@
 			.name = "dsi0_phy_pll_out_dsiclk",
 			.parent_names = (const char *[]){"dsi0pll_pclk_src",
 				"dsi0pll_shadow_pclk_src",
-				"dsi0pll_cphy_pclk_src"},
-			.num_parents = 3,
+				"dsi0pll_cphy_pclk_src",
+				"dsi0pll_shadow_cphy_pclk_src"},
+			.num_parents = 4,
 			.flags = (CLK_SET_RATE_PARENT |
 					CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
@@ -2469,8 +2595,9 @@
 			.name = "dsi1_phy_pll_out_dsiclk",
 			.parent_names = (const char *[]){"dsi1pll_pclk_src",
 				"dsi1pll_shadow_pclk_src",
-				"dsi1pll_cphy_pclk_src"},
-			.num_parents = 3,
+				"dsi1pll_cphy_pclk_src",
+				"dsi1pll_shadow_cphy_pclk_src"},
+			.num_parents = 4,
 			.flags = (CLK_SET_RATE_PARENT |
 					CLK_SET_RATE_NO_REPARENT),
 			.ops = &clk_regmap_mux_closest_ops,
@@ -2497,10 +2624,15 @@
 	[SHADOW_PLL_OUT_DIV_0_CLK] = &dsi0pll_shadow_pll_out_div.clkr.hw,
 	[SHADOW_BITCLK_SRC_0_CLK] = &dsi0pll_shadow_bitclk_src.clkr.hw,
 	[SHADOW_BYTECLK_SRC_0_CLK] = &dsi0pll_shadow_byteclk_src.hw,
+	[SHADOW_CPHY_BYTECLK_SRC_0_CLK] = &dsi0pll_shadow_cphy_byteclk_src.hw,
 	[SHADOW_POST_BIT_DIV_0_CLK] = &dsi0pll_shadow_post_bit_div.hw,
 	[SHADOW_POST_VCO_DIV_0_CLK] = &dsi0pll_shadow_post_vco_div.hw,
+	[SHADOW_POST_VCO_DIV3_5_0_CLK] = &dsi0pll_shadow_post_vco_div3_5.hw,
 	[SHADOW_PCLK_SRC_MUX_0_CLK] = &dsi0pll_shadow_pclk_src_mux.clkr.hw,
 	[SHADOW_PCLK_SRC_0_CLK] = &dsi0pll_shadow_pclk_src.clkr.hw,
+	[SHADOW_CPHY_PCLK_SRC_MUX_0_CLK] =
+			&dsi0pll_shadow_cphy_pclk_src_mux.clkr.hw,
+	[SHADOW_CPHY_PCLK_SRC_0_CLK] = &dsi0pll_shadow_cphy_pclk_src.clkr.hw,
 	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
 	[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
 	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
@@ -2519,10 +2651,15 @@
 	[SHADOW_PLL_OUT_DIV_1_CLK] = &dsi1pll_shadow_pll_out_div.clkr.hw,
 	[SHADOW_BITCLK_SRC_1_CLK] = &dsi1pll_shadow_bitclk_src.clkr.hw,
 	[SHADOW_BYTECLK_SRC_1_CLK] = &dsi1pll_shadow_byteclk_src.hw,
+	[SHADOW_CPHY_BYTECLK_SRC_1_CLK] = &dsi1pll_shadow_cphy_byteclk_src.hw,
 	[SHADOW_POST_BIT_DIV_1_CLK] = &dsi1pll_shadow_post_bit_div.hw,
 	[SHADOW_POST_VCO_DIV_1_CLK] = &dsi1pll_shadow_post_vco_div.hw,
+	[SHADOW_POST_VCO_DIV3_5_1_CLK] = &dsi1pll_shadow_post_vco_div3_5.hw,
 	[SHADOW_PCLK_SRC_MUX_1_CLK] = &dsi1pll_shadow_pclk_src_mux.clkr.hw,
 	[SHADOW_PCLK_SRC_1_CLK] = &dsi1pll_shadow_pclk_src.clkr.hw,
+	[SHADOW_CPHY_PCLK_SRC_MUX_1_CLK] =
+			&dsi1pll_shadow_cphy_pclk_src_mux.clkr.hw,
+	[SHADOW_CPHY_PCLK_SRC_1_CLK] = &dsi1pll_shadow_cphy_pclk_src.clkr.hw,
 };
 
 int dsi_pll_clock_register_7nm(struct platform_device *pdev,
@@ -2581,6 +2718,7 @@
 		dsi0pll_pclk_src.clkr.regmap = rmap;
 		dsi0pll_cphy_pclk_src.clkr.regmap = rmap;
 		dsi0pll_shadow_pclk_src.clkr.regmap = rmap;
+		dsi0pll_shadow_cphy_pclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
@@ -2594,6 +2732,7 @@
 					&cphy_pclk_src_mux_regmap_bus,
 					pll_res, &dsi_pll_7nm_config);
 		dsi0pll_cphy_pclk_src_mux.clkr.regmap = rmap;
+		dsi0pll_shadow_cphy_pclk_src_mux.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
@@ -2609,7 +2748,7 @@
 			dsi0pll_shadow_vco_clk.max_rate = 5000000000;
 		}
 
-		for (i = VCO_CLK_0; i <= CPHY_PCLK_SRC_0_CLK; i++) {
+		for (i = VCO_CLK_0; i <= SHADOW_CPHY_PCLK_SRC_0_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_7nm[i]);
 			if (IS_ERR(clk)) {
@@ -2640,6 +2779,7 @@
 		dsi1pll_pclk_src.clkr.regmap = rmap;
 		dsi1pll_cphy_pclk_src.clkr.regmap = rmap;
 		dsi1pll_shadow_pclk_src.clkr.regmap = rmap;
+		dsi1pll_shadow_cphy_pclk_src.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
@@ -2653,6 +2793,7 @@
 					&cphy_pclk_src_mux_regmap_bus,
 					pll_res, &dsi_pll_7nm_config);
 		dsi1pll_cphy_pclk_src_mux.clkr.regmap = rmap;
+		dsi1pll_shadow_cphy_pclk_src_mux.clkr.regmap = rmap;
 
 		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
 				pll_res, &dsi_pll_7nm_config);
@@ -2667,7 +2808,7 @@
 			dsi1pll_shadow_vco_clk.max_rate = 5000000000;
 		}
 
-		for (i = VCO_CLK_1; i <= CPHY_PCLK_SRC_1_CLK; i++) {
+		for (i = VCO_CLK_1; i <= SHADOW_CPHY_PCLK_SRC_1_CLK; i++) {
 			clk = devm_clk_register(&pdev->dev,
 						mdss_dsi_pllcc_7nm[i]);
 			if (IS_ERR(clk)) {
diff --git a/rotator/sde_rotator_core.c b/rotator/sde_rotator_core.c
index 0febbd6..e7f72fd 100644
--- a/rotator/sde_rotator_core.c
+++ b/rotator/sde_rotator_core.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"%s:%d: " fmt, __func__, __LINE__
@@ -79,6 +79,10 @@
 
 #define BUS_VOTE_19_MHZ 153600000
 
+#define ROT_HAS_UBWC(caps) (test_bit(SDE_CAPS_UBWC_2, caps) ||\
+		test_bit(SDE_CAPS_UBWC_3, caps) ||\
+		test_bit(SDE_CAPS_UBWC_4, caps))
+
 /* forward prototype */
 static int sde_rotator_update_perf(struct sde_rot_mgr *mgr);
 
@@ -610,7 +614,7 @@
 
 			sid_info = (uint32_t *) shm.vaddr;
 			desc.args[1] = shm.paddr;
-			desc.args[2] = shm.size;
+			desc.args[2] = sizeof(uint32_t);
 		} else {
 			sid_info = kzalloc(sizeof(uint32_t), GFP_KERNEL);
 			if (!sid_info)
@@ -2033,14 +2037,25 @@
 static int sde_rotator_validate_fmt_and_item_flags(
 	struct sde_rotation_config *config, struct sde_rotation_item *item)
 {
-	struct sde_mdp_format_params *fmt;
+	struct sde_mdp_format_params *in_fmt, *out_fmt;
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	bool has_ubwc;
 
-	fmt = sde_get_format_params(item->input.format);
+	in_fmt = sde_get_format_params(item->input.format);
+	out_fmt = sde_get_format_params(item->output.format);
 	if ((item->flags & SDE_ROTATION_DEINTERLACE) &&
-			sde_mdp_is_ubwc_format(fmt)) {
+			sde_mdp_is_ubwc_format(in_fmt)) {
 		SDEROT_DBG("cannot perform deinterlace on tiled formats\n");
 		return -EINVAL;
 	}
+
+	has_ubwc = ROT_HAS_UBWC(mdata->sde_caps_map);
+	if (!has_ubwc && (sde_mdp_is_ubwc_format(in_fmt) ||
+		sde_mdp_is_ubwc_format(out_fmt))) {
+		SDEROT_ERR("ubwc format is not supported\n");
+		return -EINVAL;
+	}
+
 	return 0;
 }