Merge "msm: vidc: Check rate control while validating HP"
diff --git a/Documentation/devicetree/bindings/arm/msm/imem.txt b/Documentation/devicetree/bindings/arm/msm/imem.txt
index 654f26e..440628d 100644
--- a/Documentation/devicetree/bindings/arm/msm/imem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/imem.txt
@@ -46,6 +46,12 @@
 -compatible: "qcom,msm-imem-restart_reason
 -reg: start address and size of restart_reason region in imem
 
+Download Mode Type:
+-------------------
+Required properties:
+-compatible: "qcom,msm-imem-dload-type"
+-reg: start address and size of dload type region in imem
+
 Download Mode:
 --------------
 Required properties:
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
index 628b2aa..90bc368 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
@@ -31,6 +31,9 @@
 4. LLCC AMON Driver:
 Keeps track of the data progress within the internal channels of LLCC.
 
+5. LLCC Performance Monitor
+Used to monitor the events of LLCC sub blocks.
+
 == llcc device ==
 
 Require Properties:
@@ -107,6 +110,10 @@
 			compatible = "qcom,llcc-amon";
 			qcom,fg-cnt = <0x7>;
 		};
+
+		qcom,llcc-perfmon {
+			compatible = "qcom,llcc-perfmon";
+		};
 	};
 
 == Client ==
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
index 051b315..6f1d8e3 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -20,7 +20,7 @@
   - qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
   - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
   - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target. Default value is 1 if not specified.
-  - qcom,smmu-s1-bypass : Boolean flag to bypass SMMU stage 1 translation.
+  - qcom,smmu-s1-enable : Boolean flag to enable SMMU stage 1 translation.
   - iommus : A list of phandle and IOMMU specifier pairs that describe the IOMMU master interfaces of the device.
 
 Example:
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
index fa27198..231f31a 100644
--- a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -40,7 +40,7 @@
 	required. For other targets such as fsm, they do not perform
 	bus scaling. It is not required for those targets.
 
-  - qcom,smmu-s1-bypass : Boolean flag to bypass SMMU stage 1 translation.
+  - qcom,smmu-s1-enable : Boolean flag to bypass SMMU stage 1 translation.
   - iommus : A list of phandle and IOMMU specifier pairs that describe the IOMMU master interfaces of the device.
 
 Example:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
index 9c26374..7e75d2c 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
@@ -6,7 +6,7 @@
 - compatible:
 	Usage: required
 	Value type: <string>
-	Definition: must be "qcom,sdm845-pinctrl"
+	Definition: must be "qcom,sdm845-pinctrl" or "qcom,sdm845-pinctrl-v2"
 
 - reg:
 	Usage: required
diff --git a/Documentation/devicetree/bindings/serial/msm_serial_hs.txt b/Documentation/devicetree/bindings/serial/msm_serial_hs.txt
new file mode 100644
index 0000000..031be45
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/msm_serial_hs.txt
@@ -0,0 +1,121 @@
+* Qualcomm MSM HSUART
+
+Required properties:
+- compatible :
+	- "qcom,msm-hsuart-v14" to be used for UARTDM Core v1.4
+- reg : offset and length of the register set for both the device,
+	uart core and bam core
+- reg-names :
+	- "core_mem" to be used as name of the uart core
+	- "bam_mem" to be used as name of the bam core
+- interrupts : interrupts for both the device,uart core and bam core
+- interrupt-names :
+	- "core_irq" to be used as uart irq
+	- "bam irq" to be used as bam irq
+- #interrupt-cells: Specifies the number of cells needed to encode an interrupt
+		source. The type shall be a <u32> and the value shall be 1
+- #address-cells: Specifies the number of cells needed to encode an address.
+		The type shall be <u32> and the value shall be 0
+- interrupt-parent = It is needed for interrupt mapping
+- bam-tx-ep-pipe-index : BAM TX Endpoint Pipe Index for HSUART
+- bam-rx-ep-pipe-index : BAM RX Endpoint Pipe Index for HSUART
+
+BLSP has a static pipe allocation and assumes a pair-pipe for each uart core.
+Pipes [2*i : 2*i+1] are allocated for UART cores where i = [0 : 5].
+Hence, Minimum and Maximum permitted value of endpoint pipe index to be used
+with uart core is 0 and 11 respectively.
+
+There is one HSUART block used in MSM devices,
+"qcom,msm-hsuart-v14". The msm-serial-hs driver is
+able to handle this, and matches against the "qcom,msm-hsuart-v14"
+as the compatibility.
+
+The registers for the "qcom,msm-hsuart-v14" device need to specify both
+register blocks - uart core and bam core.
+
+Example:
+
+	uart7: uart@f995d000 {
+		compatible = "qcom,msm-hsuart-v14";
+		reg = <0xf995d000 0x1000>,
+		      <0xf9944000 0x5000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&uart7>;
+		interrupts = <0 1>;
+		#interrupt-cells = <1>;
+		interrupt-map = <0 &intc 0 113 0
+				1 &intc 0 239 0>
+		qcom,bam-tx-ep-pipe-index = <0>;
+		qcom,bam-rx-ep-pipe-index = <1>;
+	};
+
+Optional properties:
+- qcom,<gpio-name>-gpio : handle to the GPIO node, see "gpios property" in
+Documentation/devicetree/bindings/gpio/gpio.txt.
+"gpio-name" can be "tx", "rx", "cts" and "rfr" based on number of UART GPIOs
+need to configured.
+Gpio's are optional if it is required to be not configured by UART driver or
+case where there is nothing connected and we want to use internal loopback mode
+for uart.
+- qcom, wakeup_irq : UART RX GPIO IRQ line to be configured as wakeup source.
+- qcom,inject-rx-on-wakeup : inject_rx_on_wakeup enables feature where on
+receiving interrupt with UART RX GPIO IRQ line (i.e. above wakeup_irq property),
+HSUART driver injects provided character with property rx_to_inject.
+- qcom, rx-char-to-inject : The character to be inserted on wakeup.
+- qcom, no-suspend-delay : This decides system to go to suspend immediately
+or not
+
+- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
+below optional properties:
+    - qcom,msm_bus,name
+    - qcom,msm_bus,num_cases
+    - qcom,msm_bus,active_only
+    - qcom,msm_bus,num_paths
+    - qcom,msm_bus,vectors
+
+Aliases :
+An alias may be optionally used to bind the UART device to a TTY device
+(ttyHS<alias_num>) with a given alias number. Aliases are of the form
+uart<n> where <n> is an integer representing the alias number to use.
+On systems with multiple UART devices present, an alias may optionally be
+defined for such devices. The alias value should be from 0 to 255.
+
+Example:
+
+	aliases {
+		uart4 = &uart7; // This device will be enumerated as ttyHS4
+	};
+
+	uart7: uart@f995d000 {
+		compatible = "qcom,msm-hsuart-v14"
+		reg = <0x19c40000 0x1000">,
+		      <0xf9944000 0x5000>;
+		reg-names = "core_mem", "bam_mem";
+		interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+		#address-cells = <0>;
+		interrupt-parent = <&uart7>;
+		interrupts = <0 1 2>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0 &intc 0 113 0
+				1 &intc 0 239 0
+				2 &msmgpio 42 0>;
+		qcom,tx-gpio = <&msmgpio 41 0x00>;
+		qcom,rx-gpio = <&msmgpio 42 0x00>;
+		qcom,cts-gpio = <&msmgpio 43 0x00>;
+		qcom,rfr-gpio = <&msmgpio 44 0x00>;
+		qcom,inject-rx-on-wakeup = <1>;
+		qcom,rx-char-to-inject = <0xFD>;
+
+		qcom,bam-tx-ep-pipe-index = <0>;
+		qcom,bam-rx-ep-pipe-index = <1>;
+
+		qcom,msm-bus,name = "uart7";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<84 512 0 0>,
+				<84 512 500 800>;
+	};
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
index 800508a..45e309c 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
@@ -10,6 +10,10 @@
  - interrupts:  Interrupt number
  - reg: Should be address and size of EUD register space
  - reg-names: Should be "eud_base"
+ - clocks: a list of phandles to the PHY clocks. Use as per
+   Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - clock-names: Names of the clocks in 1-1 correspondence with
+   the "clocks" property.
 
 Driver notifies clients via extcon for VBUS spoof attach/detach
 and charger enable/disable events. Clients registered for these
@@ -23,6 +27,8 @@
 		interrupts = <GIC_SPI 492 IRQ_TYPE_LEVEL_HIGH>;
 		reg = <0x88e0000 0x4000>;
 		reg-names = "eud_base";
+		clocks = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+		clock-names = "cfg_ahb_clk";
 	};
 
 An example for EUD extcon client:
diff --git a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
index bef9193..c50d678 100644
--- a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
+++ b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
@@ -42,6 +42,11 @@
     cell 4: interrupt flags indicating level-sense information, as defined in
             dt-bindings/interrupt-controller/irq.h
 
+Optional properties:
+- qcom,enable-ahb-bus-workaround : Boolean flag which indicates that the AHB bus
+				   workaround sequence should be used for SPMI
+				   write transactions to avoid corruption
+
 Example V1 PMIC-Arbiter:
 
 	spmi {
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index 1826b6f..f57d9c3 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -195,6 +195,7 @@
 CONFIG_INPUT_GPIO=m
 CONFIG_SERIO_LIBPS2=y
 # CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_HS=y
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_I2C=y
@@ -286,7 +287,6 @@
 CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
 CONFIG_TRACER_PKT=y
 CONFIG_MSM_SMP2P=y
-CONFIG_MSM_SMP2P_TEST=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index ce61464..ee823ff 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -189,6 +189,7 @@
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_HS=y
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_I2C=y
@@ -284,7 +285,6 @@
 CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
 CONFIG_TRACER_PKT=y
 CONFIG_MSM_SMP2P=y
-CONFIG_MSM_SMP2P_TEST=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index a8b826a..1ee6d51 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -328,7 +328,8 @@
 				reg = <0x4400 0x100>;
 				interrupts = <0x2 0x44 0x0 IRQ_TYPE_EDGE_BOTH>,
 					     <0x2 0x44 0x1 IRQ_TYPE_EDGE_BOTH>,
-					     <0x2 0x44 0x2 IRQ_TYPE_EDGE_BOTH>;
+					     <0x2 0x44 0x2
+							IRQ_TYPE_EDGE_RISING>;
 				interrupt-names = "ima-rdy",
 						  "mem-xcp",
 						  "dma-grant";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
index 7718bca..89dee0c 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
@@ -64,7 +64,7 @@
 
 		qcom,highest-bank-bit = <14>;
 
-		qcom,min-access-length = <64>;
+		qcom,min-access-length = <32>;
 
 		qcom,ubwc-mode = <2>;
 
diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
index 4ba16e4..a50d9b6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
@@ -188,3 +188,31 @@
 &usb_qmp_dp_phy {
 	status = "disabled";
 };
+
+&mdss_mdp {
+	status = "disabled";
+};
+
+&sde_rscc {
+	status = "disabled";
+};
+
+&mdss_rotator {
+	status = "disabled";
+};
+
+&mdss_dsi0 {
+	status = "disabled";
+};
+
+&mdss_dsi1 {
+	status = "disabled";
+};
+
+&mdss_dsi_phy0 {
+	status = "disabled";
+};
+
+&mdss_dsi_phy1 {
+	status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index cb920d7..b527f3d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -42,6 +42,10 @@
 		hsuart0 = &qupv3_se6_4uart;
 	};
 
+	chosen {
+		bootargs = "rcupdate.rcu_expedited=1 core_ctl_disable_cpumask=6-7";
+	};
+
 	cpus {
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -608,6 +612,94 @@
 	ranges = <0 0 0 0xffffffff>;
 	compatible = "simple-bus";
 
+	jtag_mm0: jtagmm@7040000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7040000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU0>;
+	};
+
+	jtag_mm1: jtagmm@7140000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7140000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qom,coresight-jtagmm-cpu = <&CPU1>;
+	};
+
+	jtag_mm2: jtagmm@7240000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7240000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU2>;
+	};
+
+	jtag_mm3: jtagmm@7340000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7340000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU3>;
+	};
+
+	jtag_mm4: jtagmm@7440000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7440000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU4>;
+	};
+
+	jtag_mm5: jtagmm@7540000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7540000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU5>;
+	};
+
+	jtag_mm6: jtagmm@7640000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7640000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU6>;
+	};
+
+	jtag_mm7: jtagmm@7740000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7740000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU7>;
+	};
+
 	intc: interrupt-controller@17a00000 {
 		compatible = "arm,gic-v3";
 		#interrupt-cells = <3>;
@@ -1974,8 +2066,11 @@
 		qcom,pm-qos-legacy-latency-us = <70 70>, <70 70>;
 
 		clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>,
-			<&clock_gcc GCC_SDCC1_APPS_CLK>;
-		clock-names = "iface_clk", "core_clk";
+			<&clock_gcc GCC_SDCC1_APPS_CLK>,
+			<&clock_gcc GCC_SDCC1_ICE_CORE_CLK>;
+		clock-names = "iface_clk", "core_clk", "ice_core_clk";
+
+		qcom,ice-clk-rates = <300000000 75000000>;
 
 		qcom,nonremovable;
 
@@ -2151,32 +2246,7 @@
 		compatible = "qcom,devbw";
 		governor = "performance";
 		qcom,src-dst-ports =
-			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_LLCC>;
-		qcom,active-only;
-		qcom,bw-tbl =
-			< MHZ_TO_MBPS(150, 16) >, /*  2288 MB/s */
-			< MHZ_TO_MBPS(300, 16) >, /*  4577 MB/s */
-			< MHZ_TO_MBPS(466, 16) >, /*  7110 MB/s */
-			< MHZ_TO_MBPS(600, 16) >, /*  9155 MB/s */
-			< MHZ_TO_MBPS(806, 16) >, /* 12298 MB/s */
-			< MHZ_TO_MBPS(933, 16) >; /* 14236 MB/s */
-	};
-
-	bwmon: qcom,cpu-bwmon {
-		compatible = "qcom,bimc-bwmon4";
-		reg = <0x1436400 0x300>, <0x1436300 0x200>;
-		reg-names = "base", "global_base";
-		interrupts = <0 581 4>;
-		qcom,mport = <0>;
-		qcom,hw-timer-hz = <19200000>;
-		qcom,target-dev = <&cpubw>;
-	};
-
-	llccbw: qcom,llccbw {
-		compatible = "qcom,devbw";
-		governor = "powersave";
-		qcom,src-dst-ports =
-			<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0>;
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_EBI_CH0>;
 		qcom,active-only;
 		qcom,bw-tbl =
 			< MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
@@ -2192,16 +2262,14 @@
 			< MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
 	};
 
-	llcc_bwmon: qcom,llcc-bwmon {
-		compatible = "qcom,bimc-bwmon5";
-		reg = <0x0114a000 0x1000>;
-		reg-names = "base";
-		interrupts = <GIC_SPI 580 IRQ_TYPE_LEVEL_HIGH>;
+	bwmon: qcom,cpu-bwmon {
+		compatible = "qcom,bimc-bwmon4";
+		reg = <0x1436400 0x300>, <0x1436300 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <0 581 4>;
+		qcom,mport = <0>;
 		qcom,hw-timer-hz = <19200000>;
-		qcom,target-dev = <&llccbw>;
-		qcom,count-unit = <0x200000>;
-		qcom,byte-mid-mask = <0xe000>;
-		qcom,byte-mid-match = <0xe000>;
+		qcom,target-dev = <&cpubw>;
 	};
 
 	memlat_cpu0: qcom,memlat-cpu0 {
@@ -2454,6 +2522,38 @@
 	status = "ok";
 };
 
+&mdss_dsi0 {
+	qcom,core-supply-entries {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,core-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "refgen";
+			qcom,supply-min-voltage = <0>;
+			qcom,supply-max-voltage = <0>;
+			qcom,supply-enable-load = <0>;
+			qcom,supply-disable-load = <0>;
+		};
+	};
+};
+
+&mdss_dsi1 {
+	qcom,core-supply-entries {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,core-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "refgen";
+			qcom,supply-min-voltage = <0>;
+			qcom,supply-max-voltage = <0>;
+			qcom,supply-enable-load = <0>;
+			qcom,supply-disable-load = <0>;
+		};
+	};
+};
+
 #include "sdm670-audio.dtsi"
 #include "sdm670-usb.dtsi"
 #include "sdm670-gpu.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index 9a1f055..04418d4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -376,4 +376,47 @@
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
 	};
+
+	qcom,cam-sensor@3 {
+		cell-index = <3>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x03>;
+		csiphy-sd-index = <3>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk3_active
+				 &cam_sensor_iris_active>;
+		pinctrl-1 = <&cam_sensor_mclk3_suspend
+				 &cam_sensor_iris_suspend>;
+		gpios = <&tlmm 16 0>,
+			<&tlmm 9 0>,
+			<&tlmm 8 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK3",
+					"CAM_RESET3",
+					"CAM_VANA1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK3_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index 2702ca1..9088fac 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -376,4 +376,47 @@
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
 	};
+	qcom,cam-sensor@3 {
+		cell-index = <3>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x03>;
+		csiphy-sd-index = <3>;
+		sensor-position-roll = <270>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <0>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		cam_vana-supply = <&pmi8998_bob>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 3312000 1050000 0>;
+		rgltr-max-voltage = <0 3600000 1050000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk3_active
+				 &cam_sensor_iris_active>;
+		pinctrl-1 = <&cam_sensor_mclk3_suspend
+				 &cam_sensor_iris_suspend>;
+		gpios = <&tlmm 16 0>,
+			<&tlmm 9 0>,
+			<&tlmm 8 0>;
+		gpio-reset = <1>;
+		gpio-vana = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK3",
+					"CAM_RESET3",
+					"CAM_VANA1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK3_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index e4f768f..7c9482c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -817,7 +817,6 @@
 			"soc_ahb_clk",
 			"cpas_ahb_clk",
 			"camnoc_axi_clk",
-			"icp_apb_clk",
 			"icp_clk",
 			"icp_clk_src";
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
@@ -826,11 +825,10 @@
 				<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 				<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 				<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
-				<&clock_camcc CAM_CC_ICP_APB_CLK>,
 				<&clock_camcc CAM_CC_ICP_CLK>,
 				<&clock_camcc CAM_CC_ICP_CLK_SRC>;
 
-		clock-rates = <0 0 400000000 0 0 0 0 0 600000000>;
+		clock-rates = <0 0 400000000 0 0 0 0 600000000>;
 		clock-cntl-level = "turbo";
 		fw_name = "CAMERA_ICP.elf";
 		status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index f800b4e..11b6a4d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -329,9 +329,10 @@
 
 			compatible = "qcom,gmu-pwrlevels";
 
+			/* GMU power levels must go from lowest to highest */
 			qcom,gmu-pwrlevel@0 {
 				reg = <0>;
-				qcom,gmu-freq = <400000000>;
+				qcom,gmu-freq = <0>;
 			};
 
 			qcom,gmu-pwrlevel@1 {
@@ -341,7 +342,7 @@
 
 			qcom,gmu-pwrlevel@2 {
 				reg = <2>;
-				qcom,gmu-freq = <0>;
+				qcom,gmu-freq = <400000000>;
 			};
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
index 73cd794..cc609aa 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
@@ -183,6 +183,12 @@
 		/delete-property/ cam_vio-supply;
 		/delete-property/ cam_vana-supply;
 	};
+
+	qcom,cam-sensor@3 {
+		/delete-property/ cam_vio-supply;
+		/delete-property/ cam_vana-supply;
+	};
+
 };
 
 &clock_gcc {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index be76635..a0207e5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -210,7 +210,7 @@
 				config {
 					pins = "gpio37";
 					drive-strength = <2>;
-					bias-pull-down;
+					bias-pull-up;
 				};
 			};
 		};
@@ -2826,6 +2826,35 @@
 			};
 		};
 
+		cam_sensor_mclk3_active: cam_sensor_mclk3_active {
+			/* MCLK3 */
+			mux {
+				pins = "gpio16";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio16";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_mclk3_suspend: cam_sensor_mclk3_suspend {
+			/* MCLK3 */
+			mux {
+				pins = "gpio16";
+				function = "cam_mclk";
+			};
+
+			config {
+				pins = "gpio16";
+				bias-pull-down; /* PULL DOWN */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+
 		cam_sensor_front_active: cam_sensor_front_active {
 			/* RESET  AVDD_LDO*/
 			mux {
@@ -2855,6 +2884,36 @@
 			};
 		};
 
+		cam_sensor_iris_active: cam_sensor_iris_active {
+			/* RESET  AVDD_LDO*/
+			mux {
+				pins = "gpio9", "gpio8";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio9", "gpio8";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+			};
+		};
+
+		cam_sensor_iris_suspend: cam_sensor_iris_suspend {
+			/* RESET */
+			mux {
+				pins = "gpio9";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio9";
+				bias-disable; /* No PULL */
+				drive-strength = <2>; /* 2 MA */
+				output-low;
+			};
+		};
+
+
 		cam_sensor_mclk2_active: cam_sensor_mclk2_active {
 			/* MCLK1 */
 			mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 0494495..e7a946c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -211,6 +211,7 @@
 			qcom,sde-dspp-gamut = <0x1000 0x00040000>;
 			qcom,sde-dspp-pcc = <0x1700 0x00040000>;
 			qcom,sde-dspp-gc = <0x17c0 0x00010008>;
+			qcom,sde-dspp-hist = <0x800 0x00010007>;
 		};
 
 		qcom,platform-supply-entries {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index c070ed6..0b6e27e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -24,8 +24,7 @@
 		gdscr-supply = <&titan_top_gdsc>;
 		refgen-supply = <&refgen>;
 		csi-vdd-voltage = <1200000>;
-		mipi-csi-vdd-supply = <&pm8998_l26>;
-		mipi-dsi-vdd-supply = <&pm8998_l1>;
+		mipi-csi-vdd-supply = <&pm8998_l1>;
 		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -60,8 +59,7 @@
 		gdscr-supply = <&titan_top_gdsc>;
 		refgen-supply = <&refgen>;
 		csi-vdd-voltage = <1200000>;
-		mipi-csi-vdd-supply = <&pm8998_l26>;
-		mipi-dsi-vdd-supply = <&pm8998_l1>;
+		mipi-csi-vdd-supply = <&pm8998_l1>;
 		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -97,8 +95,7 @@
 		gdscr-supply = <&titan_top_gdsc>;
 		refgen-supply = <&refgen>;
 		csi-vdd-voltage = <1200000>;
-		mipi-csi-vdd-supply = <&pm8998_l26>;
-		mipi-dsi-vdd-supply = <&pm8998_l1>;
+		mipi-csi-vdd-supply = <&pm8998_l1>;
 		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -124,7 +121,7 @@
 	cam_csiphy3: qcom,csiphy@ac68000 {
 		cell-index = <3>;
 		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
-		reg = <0xac67000 0x1000>;
+		reg = <0xac68000 0x1000>;
 		reg-names = "csiphy";
 		reg-cam-base = <0x68000>;
 		interrupts = <0 448 0>;
@@ -133,8 +130,7 @@
 		gdscr-supply = <&titan_top_gdsc>;
 		refgen-supply = <&refgen>;
 		csi-vdd-voltage = <1200000>;
-		mipi-csi-vdd-supply = <&pm8998_l26>;
-		mipi-dsi-vdd-supply = <&pm8998_l1>;
+		mipi-csi-vdd-supply = <&pm8998_l1>;
 		clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
@@ -319,14 +315,14 @@
 			"turbo", "turbo";
 		client-id-based;
 		client-names =
-			"csiphy0", "csiphy1", "csiphy2", "cci0",
+			"csiphy0", "csiphy1", "csiphy2", "csiphy3", "cci0",
 			"csid0", "csid1", "csid2",
 			"ife0", "ife1", "ife2", "ipe0",
 			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
 			"icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
 		client-axi-port-names =
-			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
-			"cam_hf_1", "cam_hf_2", "cam_hf_2",
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_hf_2",
+			"cam_sf_1", "cam_hf_1", "cam_hf_2", "cam_hf_2",
 			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
 			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
 			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index 138b750..b3cb8f5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -25,6 +25,10 @@
 /delete-node/ &apc0_cpr;
 /delete-node/ &apc1_cpr;
 
+&tlmm {
+	compatible = "qcom,sdm845-pinctrl-v2";
+};
+
 &soc {
 	/* CPR controller regulators */
 	apc0_cpr: cprh-ctrl@17dc0000 {
@@ -1168,6 +1172,7 @@
 
 &clock_camcc {
 	compatible = "qcom,cam_cc-sdm845-v2", "syscon";
+	qcom,cam_cc_csi3phytimer_clk_src-opp-handle = <&cam_csiphy3>;
 };
 
 &clock_dispcc {
@@ -1193,7 +1198,6 @@
 
 &refgen {
 	status = "ok";
-	regulator-always-on;
 };
 
 &spss_utils {
@@ -1394,7 +1398,7 @@
 &msm_gpu {
 	/* Updated chip ID */
 	qcom,chipid = <0x06030001>;
-	qcom,initial-pwrlevel = <5>;
+	qcom,initial-pwrlevel = <6>;
 
 	qcom,gpu-pwrlevels {
 		#address-cells = <1>;
@@ -1404,54 +1408,62 @@
 
 		qcom,gpu-pwrlevel@0 {
 			reg = <0>;
+			qcom,gpu-freq = <710000000>;
+			qcom,bus-freq = <12>;
+			qcom,bus-min = <12>;
+			qcom,bus-max = <12>;
+		};
+
+		qcom,gpu-pwrlevel@1 {
+			reg = <1>;
 			qcom,gpu-freq = <675000000>;
 			qcom,bus-freq = <12>;
 			qcom,bus-min = <10>;
 			qcom,bus-max = <12>;
 		};
 
-		qcom,gpu-pwrlevel@1 {
-			reg = <1>;
+		qcom,gpu-pwrlevel@2 {
+			reg = <2>;
 			qcom,gpu-freq = <596000000>;
 			qcom,bus-freq = <10>;
 			qcom,bus-min = <9>;
-			qcom,bus-max = <11>;
+			qcom,bus-max = <12>;
 		};
 
-		qcom,gpu-pwrlevel@2 {
-			reg = <2>;
+		qcom,gpu-pwrlevel@3 {
+			reg = <3>;
 			qcom,gpu-freq = <520000000>;
 			qcom,bus-freq = <9>;
 			qcom,bus-min = <8>;
 			qcom,bus-max = <10>;
 		};
 
-		qcom,gpu-pwrlevel@3 {
-			reg = <3>;
+		qcom,gpu-pwrlevel@4 {
+			reg = <4>;
 			qcom,gpu-freq = <414000000>;
 			qcom,bus-freq = <8>;
 			qcom,bus-min = <7>;
 			qcom,bus-max = <9>;
 		};
 
-		qcom,gpu-pwrlevel@4 {
-			reg = <4>;
+		qcom,gpu-pwrlevel@5 {
+			reg = <5>;
 			qcom,gpu-freq = <342000000>;
 			qcom,bus-freq = <6>;
 			qcom,bus-min = <5>;
 			qcom,bus-max = <7>;
 		};
 
-		qcom,gpu-pwrlevel@5 {
-			reg = <5>;
+		qcom,gpu-pwrlevel@6 {
+			reg = <6>;
 			qcom,gpu-freq = <257000000>;
 			qcom,bus-freq = <4>;
 			qcom,bus-min = <3>;
 			qcom,bus-max = <5>;
 		};
 
-		qcom,gpu-pwrlevel@6 {
-			reg = <6>;
+		qcom,gpu-pwrlevel@7 {
+			reg = <7>;
 			qcom,gpu-freq = <0>;
 			qcom,bus-freq = <0>;
 			qcom,bus-min = <0>;
@@ -1467,9 +1479,10 @@
 
 		compatible = "qcom,gmu-pwrlevels";
 
+		/* GMU power levels must go from lowest to highest */
 		qcom,gmu-pwrlevel@0 {
 			reg = <0>;
-			qcom,gmu-freq = <500000000>;
+			qcom,gmu-freq = <0>;
 		};
 
 		qcom,gmu-pwrlevel@1 {
@@ -1479,7 +1492,7 @@
 
 		qcom,gmu-pwrlevel@2 {
 			reg = <2>;
-			qcom,gmu-freq = <0>;
+			qcom,gmu-freq = <500000000>;
 		};
 	};
 };
@@ -1496,7 +1509,7 @@
 			    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
 			    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
 			    0x21 0x214 /* PWR_CTRL2 */
-			    0x00 0x220 /* IMP_CTRL1 */
+			    0x07 0x220 /* IMP_CTRL1 */
 			    0x58 0x224 /* IMP_CTRL2 */
 			    0x45 0x240 /* TUNE1 */
 			    0x29 0x244 /* TUNE2 */
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 077770d..03117b1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -79,7 +79,7 @@
 			};
 			L1_I_0: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0xa000>;
+				qcom,dump-size = <0x12000>;
 			};
 			L1_D_0: l1-dcache {
 				compatible = "arm,arch-cache";
@@ -110,7 +110,7 @@
 			};
 			L1_I_100: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0xa000>;
+				qcom,dump-size = <0x12000>;
 			};
 			L1_D_100: l1-dcache {
 				compatible = "arm,arch-cache";
@@ -141,7 +141,7 @@
 			};
 			L1_I_200: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0xa000>;
+				qcom,dump-size = <0x12000>;
 			};
 			L1_D_200: l1-dcache {
 				compatible = "arm,arch-cache";
@@ -172,7 +172,7 @@
 			};
 			L1_I_300: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0xa000>;
+				qcom,dump-size = <0x12000>;
 			};
 			L1_D_300: l1-dcache {
 				compatible = "arm,arch-cache";
@@ -203,7 +203,7 @@
 			};
 			L1_I_400: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x14000>;
+				qcom,dump-size = <0x24000>;
 			};
 			L1_D_400: l1-dcache {
 				compatible = "arm,arch-cache";
@@ -234,7 +234,7 @@
 			};
 			L1_I_500: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x14000>;
+				qcom,dump-size = <0x24000>;
 			};
 			L1_D_500: l1-dcache {
 				compatible = "arm,arch-cache";
@@ -265,7 +265,7 @@
 			};
 			L1_I_600: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x14000>;
+				qcom,dump-size = <0x24000>;
 			};
 			L1_D_600: l1-dcache {
 				compatible = "arm,arch-cache";
@@ -296,7 +296,7 @@
 			};
 			L1_I_700: l1-icache {
 				compatible = "arm,arch-cache";
-				qcom,dump-size = <0x14000>;
+				qcom,dump-size = <0x24000>;
 			};
 			L1_D_700: l1-dcache {
 				compatible = "arm,arch-cache";
@@ -648,6 +648,94 @@
 	ranges = <0 0 0 0xffffffff>;
 	compatible = "simple-bus";
 
+	jtag_mm0: jtagmm@7040000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7040000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU0>;
+	};
+
+	jtag_mm1: jtagmm@7140000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7140000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU1>;
+	};
+
+	jtag_mm2: jtagmm@7240000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7240000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU2>;
+	};
+
+	jtag_mm3: jtagmm@7340000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7340000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU3>;
+	};
+
+	jtag_mm4: jtagmm@7440000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7440000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU4>;
+	};
+
+	jtag_mm5: jtagmm@7540000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7540000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU5>;
+	};
+
+	jtag_mm6: jtagmm@7640000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7640000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU6>;
+	};
+
+	jtag_mm7: jtagmm@7740000 {
+		compatible = "qcom,jtagv8-mm";
+		reg = <0x7740000 0x1000>;
+		reg-names = "etm-base";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "core_clk";
+
+		qcom,coresight-jtagmm-cpu = <&CPU7>;
+	};
+
 	intc: interrupt-controller@17a00000 {
 		compatible = "arm,gic-v3";
 		#interrupt-cells = <3>;
@@ -758,6 +846,7 @@
 		interrupt-controller;
 		#interrupt-cells = <4>;
 		cell-index = <0>;
+		qcom,enable-ahb-bus-workaround;
 	};
 
 	spmi_debug_bus: qcom,spmi-debug@6b22000 {
@@ -1793,6 +1882,8 @@
 		interrupts = <GIC_SPI 492 IRQ_TYPE_LEVEL_HIGH>;
 		reg = <0x88e0000 0x2000>;
 		reg-names = "eud_base";
+		clocks = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+		clock-names = "cfg_ahb_clk";
 		status = "ok";
 	};
 
@@ -2190,6 +2281,10 @@
 			max-slices = <32>;
 		};
 
+		qcom,llcc-perfmon {
+			compatible = "qcom,llcc-perfmon";
+		};
+
 		qcom,llcc-erp {
 			compatible = "qcom,llcc-erp";
 			interrupt-names = "ecc_irq";
@@ -2628,7 +2723,7 @@
 			<0x1dc4000 0x24000>;
 		reg-names = "crypto-base","crypto-bam-base";
 		interrupts = <0 272 0>;
-		qcom,bam-pipe-pair = <1>;
+		qcom,bam-pipe-pair = <3>;
 		qcom,ce-hw-instance = <0>;
 		qcom,ce-device = <0>;
 		qcom,ce-hw-shared;
@@ -2647,9 +2742,9 @@
 			 <&clock_gcc GCC_CE1_AXI_CLK>;
 		qcom,ce-opp-freq = <171430000>;
 		qcom,request-bw-before-clk;
-		qcom,smmu-s1-bypass;
-		iommus = <&apps_smmu 0x702 0x1>,
-			 <&apps_smmu 0x712 0x1>;
+		qcom,smmu-s1-enable;
+		iommus = <&apps_smmu 0x706 0x1>,
+			 <&apps_smmu 0x716 0x1>;
 	};
 
 	qcom_msmhdcp: qcom,msm_hdcp {
@@ -2688,9 +2783,9 @@
 		qcom,use-sw-ahash-algo;
 		qcom,use-sw-aead-algo;
 		qcom,use-sw-hmac-algo;
-		qcom,smmu-s1-bypass;
-		iommus = <&apps_smmu 0x704 0x3>,
-			 <&apps_smmu 0x714 0x3>;
+		qcom,smmu-s1-enable;
+		iommus = <&apps_smmu 0x704 0x1>,
+			 <&apps_smmu 0x714 0x1>;
 	};
 
 	qcom,msm_gsi {
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 4e7fe31..6b83b36 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -405,7 +405,6 @@
 CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
 CONFIG_MSM_GLINK_SPI_XPRT=y
 CONFIG_MSM_SMP2P=y
-CONFIG_MSM_SMP2P_TEST=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 6951086..791d349 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -422,7 +422,6 @@
 CONFIG_MSM_GLINK_SPI_XPRT=y
 CONFIG_TRACER_PKT=y
 CONFIG_MSM_SMP2P=y
-CONFIG_MSM_SMP2P_TEST=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index cc7f169..901143b 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -7,6 +7,9 @@
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IRQ_TIME_ACCOUNTING=y
 CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_RCU_NOCB_CPU=y
@@ -127,6 +130,7 @@
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -202,6 +206,8 @@
 CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_CLS_FW=y
 CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_CMP=y
 CONFIG_NET_EMATCH_NBYTE=y
@@ -230,6 +236,7 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
 CONFIG_MEMORY_STATE_TIME=y
 CONFIG_QPNP_MISC=y
 CONFIG_SCSI=y
@@ -261,9 +268,15 @@
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
 CONFIG_PPPOLAC=y
 CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
 CONFIG_USB_USBNET=y
 CONFIG_WIL6210=m
 # CONFIG_WIL6210_TRACING is not set
@@ -500,24 +513,27 @@
 CONFIG_TRACER_PKT=y
 CONFIG_QTI_RPMH_API=y
 CONFIG_MSM_SMP2P=y
-CONFIG_MSM_SMP2P_TEST=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_ICNSS=y
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_MSM_QBT1000=y
 CONFIG_APSS_CORE_EA=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MSM_REMOTEQDSS=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_QCOMCCI_HWMON=y
@@ -568,13 +584,16 @@
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index b47850c..1e9c191 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -519,7 +519,6 @@
 CONFIG_TRACER_PKT=y
 CONFIG_QTI_RPMH_API=y
 CONFIG_MSM_SMP2P=y
-CONFIG_MSM_SMP2P_TEST=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index fd96708..9ee3277 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -7,9 +7,6 @@
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IRQ_TIME_ACCOUNTING=y
 CONFIG_SCHED_WALT=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_RCU_NOCB_CPU=y
@@ -24,6 +21,7 @@
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
 CONFIG_SCHED_CORE_CTL=y
+CONFIG_SCHED_CORE_ROTATE=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
@@ -237,7 +235,6 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_QSEECOM=y
-CONFIG_UID_SYS_STATS=y
 CONFIG_MEMORY_STATE_TIME=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
@@ -492,6 +489,7 @@
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_SDM845_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
 CONFIG_MSM_SERVICE_LOCATOR=y
 CONFIG_MSM_SERVICE_NOTIFIER=y
 CONFIG_MSM_BOOT_STATS=y
@@ -512,7 +510,6 @@
 CONFIG_TRACER_PKT=y
 CONFIG_QTI_RPMH_API=y
 CONFIG_MSM_SMP2P=y
-CONFIG_MSM_SMP2P_TEST=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 0d89dc7..f251367 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -25,6 +25,7 @@
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_BPF=y
 CONFIG_SCHED_CORE_CTL=y
+CONFIG_SCHED_CORE_ROTATE=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
@@ -503,6 +504,7 @@
 CONFIG_QCOM_RUN_QUEUE_STATS=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_SDM845_LLCC=y
+CONFIG_QCOM_LLCC_PERFMON=m
 CONFIG_MSM_SERVICE_LOCATOR=y
 CONFIG_MSM_SERVICE_NOTIFIER=y
 CONFIG_MSM_BOOT_STATS=y
@@ -525,7 +527,6 @@
 CONFIG_TRACER_PKT=y
 CONFIG_QTI_RPMH_API=y
 CONFIG_MSM_SMP2P=y
-CONFIG_MSM_SMP2P_TEST=y
 CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_GLINK_PKT=y
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 06ff7fd..b6c6fa2 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -22,14 +22,19 @@
 #define MODULE_ARCH_VERMAGIC	"aarch64"
 
 #ifdef CONFIG_ARM64_MODULE_PLTS
-struct mod_arch_specific {
+struct mod_plt_sec {
 	struct elf64_shdr	*plt;
 	int			plt_num_entries;
 	int			plt_max_entries;
 };
+
+struct mod_arch_specific {
+	struct mod_plt_sec	core;
+	struct mod_plt_sec	init;
+};
 #endif
 
-u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
+u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
 			  Elf64_Sym *sym);
 
 #ifdef CONFIG_RANDOMIZE_BASE
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 1ce90d8..d05dbe6 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014-2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -26,35 +26,21 @@ struct plt_entry {
 	__le32	br;	/* br	x16				*/
 };
 
-u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
+static bool in_init(const struct module *mod, void *loc)
+{
+	return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
+}
+
+u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
 			  Elf64_Sym *sym)
 {
-	struct plt_entry *plt = (struct plt_entry *)mod->arch.plt->sh_addr;
-	int i = mod->arch.plt_num_entries;
+	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
+							  &mod->arch.init;
+	struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
+	int i = pltsec->plt_num_entries;
 	u64 val = sym->st_value + rela->r_addend;
 
 	/*
-	 * We only emit PLT entries against undefined (SHN_UNDEF) symbols,
-	 * which are listed in the ELF symtab section, but without a type
-	 * or a size.
-	 * So, similar to how the module loader uses the Elf64_Sym::st_value
-	 * field to store the resolved addresses of undefined symbols, let's
-	 * borrow the Elf64_Sym::st_size field (whose value is never used by
-	 * the module loader, even for symbols that are defined) to record
-	 * the address of a symbol's associated PLT entry as we emit it for a
-	 * zero addend relocation (which is the only kind we have to deal with
-	 * in practice). This allows us to find duplicates without having to
-	 * go through the table every time.
-	 */
-	if (rela->r_addend == 0 && sym->st_size != 0) {
-		BUG_ON(sym->st_size < (u64)plt || sym->st_size >= (u64)&plt[i]);
-		return sym->st_size;
-	}
-
-	mod->arch.plt_num_entries++;
-	BUG_ON(mod->arch.plt_num_entries > mod->arch.plt_max_entries);
-
-	/*
 	 * MOVK/MOVN/MOVZ opcode:
 	 * +--------+------------+--------+-----------+-------------+---------+
 	 * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
@@ -72,8 +58,19 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
 		cpu_to_le32(0xd61f0200)
 	};
 
-	if (rela->r_addend == 0)
-		sym->st_size = (u64)&plt[i];
+	/*
+	 * Check if the entry we just created is a duplicate. Given that the
+	 * relocations are sorted, this will be the last entry we allocated.
+	 * (if one exists).
+	 */
+	if (i > 0 &&
+	    plt[i].mov0 == plt[i - 1].mov0 &&
+	    plt[i].mov1 == plt[i - 1].mov1 &&
+	    plt[i].mov2 == plt[i - 1].mov2)
+		return (u64)&plt[i - 1];
+
+	pltsec->plt_num_entries++;
+	BUG_ON(pltsec->plt_num_entries > pltsec->plt_max_entries);
 
 	return (u64)&plt[i];
 }
@@ -104,7 +101,8 @@ static bool duplicate_rel(const Elf64_Rela *rela, int num)
 	return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
 }
 
-static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num)
+static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
+			       Elf64_Word dstidx)
 {
 	unsigned int ret = 0;
 	Elf64_Sym *s;
@@ -116,13 +114,17 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num)
 		case R_AARCH64_CALL26:
 			/*
 			 * We only have to consider branch targets that resolve
-			 * to undefined symbols. This is not simply a heuristic,
-			 * it is a fundamental limitation, since the PLT itself
-			 * is part of the module, and needs to be within 128 MB
-			 * as well, so modules can never grow beyond that limit.
+			 * to symbols that are defined in a different section.
+			 * This is not simply a heuristic, it is a fundamental
+			 * limitation, since there is no guaranteed way to emit
+			 * PLT entries sufficiently close to the branch if the
+			 * section size exceeds the range of a branch
+			 * instruction. So ignore relocations against defined
+			 * symbols if they live in the same section as the
+			 * relocation target.
 			 */
 			s = syms + ELF64_R_SYM(rela[i].r_info);
-			if (s->st_shndx != SHN_UNDEF)
+			if (s->st_shndx == dstidx)
 				break;
 
 			/*
@@ -149,7 +151,8 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num)
 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 			      char *secstrings, struct module *mod)
 {
-	unsigned long plt_max_entries = 0;
+	unsigned long core_plts = 0;
+	unsigned long init_plts = 0;
 	Elf64_Sym *syms = NULL;
 	int i;
 
@@ -158,14 +161,16 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 	 * entries. Record the symtab address as well.
 	 */
 	for (i = 0; i < ehdr->e_shnum; i++) {
-		if (strcmp(".plt", secstrings + sechdrs[i].sh_name) == 0)
-			mod->arch.plt = sechdrs + i;
+		if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
+			mod->arch.core.plt = sechdrs + i;
+		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
+			mod->arch.init.plt = sechdrs + i;
 		else if (sechdrs[i].sh_type == SHT_SYMTAB)
 			syms = (Elf64_Sym *)sechdrs[i].sh_addr;
 	}
 
-	if (!mod->arch.plt) {
-		pr_err("%s: module PLT section missing\n", mod->name);
+	if (!mod->arch.core.plt || !mod->arch.init.plt) {
+		pr_err("%s: module PLT section(s) missing\n", mod->name);
 		return -ENOEXEC;
 	}
 	if (!syms) {
@@ -188,14 +193,27 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 		/* sort by type, symbol index and addend */
 		sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
 
-		plt_max_entries += count_plts(syms, rels, numrels);
+		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
+			core_plts += count_plts(syms, rels, numrels,
+						sechdrs[i].sh_info);
+		else
+			init_plts += count_plts(syms, rels, numrels,
+						sechdrs[i].sh_info);
 	}
 
-	mod->arch.plt->sh_type = SHT_NOBITS;
-	mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
-	mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
-	mod->arch.plt->sh_size = plt_max_entries * sizeof(struct plt_entry);
-	mod->arch.plt_num_entries = 0;
-	mod->arch.plt_max_entries = plt_max_entries;
+	mod->arch.core.plt->sh_type = SHT_NOBITS;
+	mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+	mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.core.plt->sh_size = (core_plts  + 1) * sizeof(struct plt_entry);
+	mod->arch.core.plt_num_entries = 0;
+	mod->arch.core.plt_max_entries = core_plts;
+
+	mod->arch.init.plt->sh_type = SHT_NOBITS;
+	mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+	mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.init.plt->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
+	mod->arch.init.plt_num_entries = 0;
+	mod->arch.init.plt_max_entries = init_plts;
+
 	return 0;
 }
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 7f31698..c9a2ab4 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -380,7 +380,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 
 			if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
 			    ovf == -ERANGE) {
-				val = module_emit_plt_entry(me, &rel[i], sym);
+				val = module_emit_plt_entry(me, loc, &rel[i], sym);
 				ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
 						     26, AARCH64_INSN_IMM_26);
 			}
diff --git a/arch/arm64/kernel/module.lds b/arch/arm64/kernel/module.lds
index 8949f6c..f7c9781 100644
--- a/arch/arm64/kernel/module.lds
+++ b/arch/arm64/kernel/module.lds
@@ -1,3 +1,4 @@
 SECTIONS {
 	.plt (NOLOAD) : { BYTE(0) }
+	.init.plt (NOLOAD) : { BYTE(0) }
 }
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 1ac8008..fcf85be 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2743,6 +2743,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
 	return true;
 }
 
+/**
+ * binder_get_node_refs_for_txn() - Get required refs on node for txn
+ * @node:         struct binder_node for which to get refs
+ * @proc:         returns @node->proc if valid
+ * @error:        if no @proc then returns BR_DEAD_REPLY
+ *
+ * User-space normally keeps the node alive when creating a transaction
+ * since it has a reference to the target. The local strong ref keeps it
+ * alive if the sending process dies before the target process processes
+ * the transaction. If the source process is malicious or has a reference
+ * counting bug, relying on the local strong ref can fail.
+ *
+ * Since user-space can cause the local strong ref to go away, we also take
+ * a tmpref on the node to ensure it survives while we are constructing
+ * the transaction. We also need a tmpref on the proc while we are
+ * constructing the transaction, so we take that here as well.
+ *
+ * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
+ * Also sets @proc if valid. If the @node->proc is NULL indicating that the
+ * target proc has died, @error is set to BR_DEAD_REPLY
+ */
+static struct binder_node *binder_get_node_refs_for_txn(
+		struct binder_node *node,
+		struct binder_proc **procp,
+		uint32_t *error)
+{
+	struct binder_node *target_node = NULL;
+
+	binder_node_inner_lock(node);
+	if (node->proc) {
+		target_node = node;
+		binder_inc_node_nilocked(node, 1, 0, NULL);
+		binder_inc_node_tmpref_ilocked(node);
+		node->proc->tmp_ref++;
+		*procp = node->proc;
+	} else
+		*error = BR_DEAD_REPLY;
+	binder_node_inner_unlock(node);
+
+	return target_node;
+}
+
 static void binder_transaction(struct binder_proc *proc,
 			       struct binder_thread *thread,
 			       struct binder_transaction_data *tr, int reply,
@@ -2845,43 +2887,35 @@ static void binder_transaction(struct binder_proc *proc,
 			ref = binder_get_ref_olocked(proc, tr->target.handle,
 						     true);
 			if (ref) {
-				binder_inc_node(ref->node, 1, 0, NULL);
-				target_node = ref->node;
+				target_node = binder_get_node_refs_for_txn(
+						ref->node, &target_proc,
+						&return_error);
+			} else {
+				binder_user_error("%d:%d got transaction to invalid handle\n",
+						  proc->pid, thread->pid);
+				return_error = BR_FAILED_REPLY;
 			}
 			binder_proc_unlock(proc);
-			if (target_node == NULL) {
-				binder_user_error("%d:%d got transaction to invalid handle\n",
-					proc->pid, thread->pid);
-				return_error = BR_FAILED_REPLY;
-				return_error_param = -EINVAL;
-				return_error_line = __LINE__;
-				goto err_invalid_target_handle;
-			}
 		} else {
 			mutex_lock(&context->context_mgr_node_lock);
 			target_node = context->binder_context_mgr_node;
-			if (target_node == NULL) {
+			if (target_node)
+				target_node = binder_get_node_refs_for_txn(
+						target_node, &target_proc,
+						&return_error);
+			else
 				return_error = BR_DEAD_REPLY;
-				mutex_unlock(&context->context_mgr_node_lock);
-				return_error_line = __LINE__;
-				goto err_no_context_mgr_node;
-			}
-			binder_inc_node(target_node, 1, 0, NULL);
 			mutex_unlock(&context->context_mgr_node_lock);
 		}
-		e->to_node = target_node->debug_id;
-		binder_node_lock(target_node);
-		target_proc = target_node->proc;
-		if (target_proc == NULL) {
-			binder_node_unlock(target_node);
-			return_error = BR_DEAD_REPLY;
+		if (!target_node) {
+			/*
+			 * return_error is set above
+			 */
+			return_error_param = -EINVAL;
 			return_error_line = __LINE__;
 			goto err_dead_binder;
 		}
-		binder_inner_proc_lock(target_proc);
-		target_proc->tmp_ref++;
-		binder_inner_proc_unlock(target_proc);
-		binder_node_unlock(target_node);
+		e->to_node = target_node->debug_id;
 		if (security_binder_transaction(proc->tsk,
 						target_proc->tsk) < 0) {
 			return_error = BR_FAILED_REPLY;
@@ -3240,6 +3274,8 @@ static void binder_transaction(struct binder_proc *proc,
 	if (target_thread)
 		binder_thread_dec_tmpref(target_thread);
 	binder_proc_dec_tmpref(target_proc);
+	if (target_node)
+		binder_dec_node_tmpref(target_node);
 	/*
 	 * write barrier to synchronize with initialization
 	 * of log entry
@@ -3259,6 +3295,8 @@ static void binder_transaction(struct binder_proc *proc,
 err_copy_data_failed:
 	trace_binder_transaction_failed_buffer_release(t->buffer);
 	binder_transaction_buffer_release(target_proc, t->buffer, offp);
+	if (target_node)
+		binder_dec_node_tmpref(target_node);
 	target_node = NULL;
 	t->buffer->transaction = NULL;
 	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@@ -3273,13 +3311,14 @@ static void binder_transaction(struct binder_proc *proc,
 err_empty_call_stack:
 err_dead_binder:
 err_invalid_target_handle:
-err_no_context_mgr_node:
 	if (target_thread)
 		binder_thread_dec_tmpref(target_thread);
 	if (target_proc)
 		binder_proc_dec_tmpref(target_proc);
-	if (target_node)
+	if (target_node) {
 		binder_dec_node(target_node, 1, 0);
+		binder_dec_node_tmpref(target_node);
+	}
 
 	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
 		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index ac3c1fd..c9ae689 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -21,7 +21,7 @@
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
 #include <linux/sched.h>
-#include <linux/wakelock.h>
+#include <linux/device.h>
 #include <linux/atomic.h>
 #include "diagfwd_bridge.h"
 
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 18f941d..543f0a2 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -2368,7 +2368,9 @@ long diagchar_ioctl(struct file *filp,
 		mutex_unlock(&driver->dci_mutex);
 		break;
 	case DIAG_IOCTL_DCI_EVENT_STATUS:
+		mutex_lock(&driver->dci_mutex);
 		result = diag_ioctl_dci_event_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
 		break;
 	case DIAG_IOCTL_DCI_CLEAR_LOGS:
 		mutex_lock(&driver->dci_mutex);
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 62ae8c5..5caa975 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1973,6 +1973,7 @@ static void cam_cc_sdm845_fixup_sdm845v2(void)
 		&cam_cc_csi3phytimer_clk_src.clkr;
 	cam_cc_cphy_rx_clk_src.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src_sdm845_v2;
 	cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 384000000;
+	cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 384000000;
 	cam_cc_fd_core_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_sdm845_v2;
 	cam_cc_fd_core_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 384000000;
 	cam_cc_icp_clk_src.freq_tbl = ftbl_cam_cc_icp_clk_src_sdm845_v2;
@@ -1985,6 +1986,9 @@ static void cam_cc_sdm845_fixup_sdm845v2(void)
 	cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] = 320000000;
 	cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] = 400000000;
 	cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 80000000;
+	cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 80000000;
+	cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		80000000;
 }
 
 static void cam_cc_sdm845_fixup_sdm670(void)
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 258a6f2..93a08db 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -2060,7 +2060,7 @@ static int clk_osm_get_lut(struct platform_device *pdev,
 			last_entry = true;
 		}
 	}
-	fmax_temp[k] = abs_fmax;
+	fmax_temp[k++] = abs_fmax;
 
 	osm_clks_init[c->cluster_num].rate_max = devm_kzalloc(&pdev->dev,
 						 k * sizeof(unsigned long),
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index b256157..3b13c9b 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -1034,19 +1034,19 @@ MODULE_DEVICE_TABLE(of, disp_cc_sdm845_match_table);
 static void disp_cc_sdm845_fixup_sdm845v2(struct regmap *regmap)
 {
 	clk_fabia_pll_configure(&disp_cc_pll0, regmap,
-					&disp_cc_pll0_config_v2);
+		&disp_cc_pll0_config_v2);
 	disp_cc_mdss_byte0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
 		180000000;
 	disp_cc_mdss_byte0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
 		275000000;
 	disp_cc_mdss_byte0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
-		358000000;
+		328580000;
 	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
 		180000000;
 	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
 		275000000;
 	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
-		358000000;
+		328580000;
 	disp_cc_mdss_dp_pixel1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
 		337500;
 	disp_cc_mdss_dp_pixel_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
@@ -1063,10 +1063,14 @@ static void disp_cc_sdm845_fixup_sdm845v2(struct regmap *regmap)
 		280000000;
 	disp_cc_mdss_pclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
 		430000000;
+	disp_cc_mdss_pclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		430000000;
 	disp_cc_mdss_pclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
 		280000000;
 	disp_cc_mdss_pclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
 		430000000;
+	disp_cc_mdss_pclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		430000000;
 	disp_cc_mdss_rot_clk_src.freq_tbl =
 		ftbl_disp_cc_mdss_rot_clk_src_sdm845_v2;
 	disp_cc_mdss_rot_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
@@ -1083,6 +1087,10 @@ static void disp_cc_sdm845_fixup_sdm670(struct regmap *regmap)
 
 	disp_cc_mdss_mdp_clk_src.freq_tbl =
 		ftbl_disp_cc_mdss_mdp_clk_src_sdm670;
+	disp_cc_mdss_byte0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		358000000;
+	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		358000000;
 }
 
 static int disp_cc_sdm845_fixup(struct platform_device *pdev,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index a363235..4142dd5 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -4180,6 +4180,7 @@ static void gcc_sdm845_fixup_sdm845v2(void)
 		240000000;
 	gcc_ufs_phy_axi_clk_src.freq_tbl =
 		ftbl_gcc_ufs_card_axi_clk_src_sdm845_v2;
+	gcc_vsensor_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 600000000;
 }
 
 static void gcc_sdm845_fixup_sdm670(void)
@@ -4250,16 +4251,14 @@ static void gcc_sdm845_fixup_sdm670(void)
 
 	gcc_cpuss_rbcpr_clk_src.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src_sdm670;
 	gcc_cpuss_rbcpr_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
-					50000000;
-	gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
-					50000000;
+		50000000;
+	gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 50000000;
 	gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
-					100000000;
+		100000000;
 	gcc_sdcc2_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
-					201500000;
+		201500000;
 	gcc_sdcc4_apps_clk_src.freq_tbl = ftbl_gcc_sdcc4_apps_clk_src_sdm670;
-	gcc_sdcc4_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
-					33333333;
+	gcc_sdcc4_apps_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 33333333;
 }
 
 static int gcc_sdm845_fixup(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index db0dad1..35a23f7 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -631,7 +631,7 @@ static void gpu_cc_sdm845_fixup_sdm670(struct regmap *regmap)
 	clk_fabia_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
 
 	gpu_cc_gmu_clk_src.freq_tbl = ftbl_gpu_cc_gmu_clk_src_sdm670;
-	gpu_cc_gmu_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 0;
+	gpu_cc_gmu_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 200000000;
 }
 
 static void gpu_cc_gfx_sdm845_fixup_sdm845v2(void)
@@ -640,39 +640,36 @@ static void gpu_cc_gfx_sdm845_fixup_sdm845v2(void)
 				ftbl_gpu_cc_gx_gfx3d_clk_src_sdm845_v2;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_MIN] = 180000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOWER] =
-				257000000;
+		257000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW] = 342000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW_L1] =
-				414000000;
+		414000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_NOMINAL] =
-				520000000;
+		520000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_NOMINAL_L1] =
-				596000000;
+		596000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH] = 675000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH_L1] =
-				710000000;
+		710000000;
 }
 
 static void gpu_cc_gfx_sdm845_fixup_sdm670(void)
 {
 	gpu_cc_gx_gfx3d_clk_src.freq_tbl =
 				ftbl_gpu_cc_gx_gfx3d_clk_src_sdm670;
-	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_MIN] =
-				180000000;
+	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_MIN] = 180000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOWER] =
-				267000000;
-	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW] =
-				355000000;
+		267000000;
+	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW] = 355000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_LOW_L1] =
-				430000000;
+		430000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_NOMINAL] =
-				565000000;
+		565000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_NOMINAL_L1] =
-				650000000;
-	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH] =
-				750000000;
+		650000000;
+	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH] = 750000000;
 	gpu_cc_gx_gfx3d_clk_src.clkr.hw.init->rate_max[VDD_GX_HIGH_L1] =
-				780000000;
+		780000000;
 }
 
 static int gpu_cc_gfx_sdm845_fixup(struct platform_device *pdev)
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 35d7542..4426bc7 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -162,7 +162,7 @@ struct qce_device {
 	bool cadence_flag;
 	uint8_t *dummyreq_in_buf;
 	struct dma_iommu_mapping *smmu_mapping;
-	bool bypass_s1_smmu;
+	bool enable_s1_smmu;
 };
 
 static void print_notify_debug(struct sps_event_notify *notify);
@@ -5716,8 +5716,8 @@ static int __qce_get_device_tree_data(struct platform_device *pdev,
 		pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
 	}
 
-	if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-bypass"))
-		pce_dev->bypass_s1_smmu = true;
+	if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-enable"))
+		pce_dev->enable_s1_smmu = true;
 
 	pce_dev->ce_bam_info.dest_pipe_index	=
 			2 * pce_dev->ce_bam_info.pipe_pair_index;
@@ -5963,7 +5963,7 @@ static void qce_iommu_release_iomapping(struct qce_device *pce_dev)
 static int qce_smmu_init(struct qce_device *pce_dev)
 {
 	struct dma_iommu_mapping *mapping;
-	int s1_bypass = 1;
+	int attr = 1;
 	int ret = 0;
 
 	mapping = arm_iommu_create_mapping(&platform_bus_type,
@@ -5975,9 +5975,16 @@ static int qce_smmu_init(struct qce_device *pce_dev)
 	}
 
 	ret = iommu_domain_set_attr(mapping->domain,
-				DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
+				DOMAIN_ATTR_ATOMIC, &attr);
 	if (ret < 0) {
-		pr_err("Set s1_bypass attribute failed, err = %d\n", ret);
+		pr_err("Set ATOMIC attr failed, err = %d\n", ret);
+		goto ext_fail_set_attr;
+	}
+
+	ret = iommu_domain_set_attr(mapping->domain,
+				DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR, &attr);
+	if (ret < 0) {
+		pr_err("Set UPSTREAM_IOVA_ALLOCATOR failed, err = %d\n", ret);
 		goto ext_fail_set_attr;
 	}
 
@@ -6020,6 +6027,13 @@ void *qce_open(struct platform_device *pdev, int *rc)
 		goto err_pce_dev;
 	}
 
+	if (pce_dev->enable_s1_smmu) {
+		if (qce_smmu_init(pce_dev)) {
+			*rc = -EIO;
+			goto err_pce_dev;
+		}
+	}
+
 	for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
 		atomic_set(&pce_dev->ce_request_info[i].in_use, false);
 	pce_dev->ce_request_index = 0;
@@ -6051,13 +6065,6 @@ void *qce_open(struct platform_device *pdev, int *rc)
 	if (*rc)
 		goto err_enable_clk;
 
-	if (pce_dev->bypass_s1_smmu) {
-		if (qce_smmu_init(pce_dev)) {
-			*rc = -EIO;
-			goto err_smmu;
-		}
-	}
-
 	if (_probe_ce_engine(pce_dev)) {
 		*rc = -ENXIO;
 		goto err;
@@ -6084,9 +6091,6 @@ void *qce_open(struct platform_device *pdev, int *rc)
 	mutex_unlock(&qce_iomap_mutex);
 	return pce_dev;
 err:
-	if (pce_dev->bypass_s1_smmu)
-		qce_iommu_release_iomapping(pce_dev);
-err_smmu:
 	qce_disable_clk(pce_dev);
 
 err_enable_clk:
@@ -6099,6 +6103,9 @@ void *qce_open(struct platform_device *pdev, int *rc)
 		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
 			pce_dev->coh_vmem, pce_dev->coh_pmem);
 err_iobase:
+	if (pce_dev->enable_s1_smmu)
+		qce_iommu_release_iomapping(pce_dev);
+
 	if (pce_dev->iobase)
 		iounmap(pce_dev->iobase);
 err_pce_dev:
@@ -6128,7 +6135,7 @@ int qce_close(void *handle)
 	kfree(pce_dev->dummyreq_in_buf);
 	kfree(pce_dev->iovec_vmem);
 
-	if (pce_dev->bypass_s1_smmu)
+	if (pce_dev->enable_s1_smmu)
 		qce_iommu_release_iomapping(pce_dev);
 
 	qce_disable_clk(pce_dev);
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index ffe60de..f9b758f7 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -575,7 +575,9 @@ unsigned long get_zone_count(struct bwmon *m, unsigned int zone,
 		count = readl_relaxed(MON2_ZONE_MAX(m, zone)) + 1;
 		break;
 	case MON3:
-		count = readl_relaxed(MON3_ZONE_MAX(m, zone)) + 1;
+		count = readl_relaxed(MON3_ZONE_MAX(m, zone));
+		if (count)
+			count++;
 		break;
 	}
 
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 25407c4..92ac0ec 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -74,7 +74,7 @@ static ssize_t dp_debug_write_edid_modes(struct file *file,
 	struct dp_debug_private *debug = file->private_data;
 	char buf[SZ_32];
 	size_t len = 0;
-	int hdisplay = 0, vdisplay = 0, vrefresh = 0;
+	int hdisplay = 0, vdisplay = 0, vrefresh = 0, aspect_ratio;
 
 	if (!debug)
 		return -ENODEV;
@@ -89,7 +89,8 @@ static ssize_t dp_debug_write_edid_modes(struct file *file,
 
 	buf[len] = '\0';
 
-	if (sscanf(buf, "%d %d %d", &hdisplay, &vdisplay, &vrefresh) != 3)
+	if (sscanf(buf, "%d %d %d %d", &hdisplay, &vdisplay, &vrefresh,
+				&aspect_ratio) != 4)
 		goto clear;
 
 	if (!hdisplay || !vdisplay || !vrefresh)
@@ -99,6 +100,7 @@ static ssize_t dp_debug_write_edid_modes(struct file *file,
 	debug->dp_debug.hdisplay = hdisplay;
 	debug->dp_debug.vdisplay = vdisplay;
 	debug->dp_debug.vrefresh = vrefresh;
+	debug->dp_debug.aspect_ratio = aspect_ratio;
 	goto end;
 clear:
 	pr_debug("clearing debug modes\n");
@@ -198,11 +200,11 @@ static ssize_t dp_debug_read_edid_modes(struct file *file,
 
 	list_for_each_entry(mode, &connector->modes, head) {
 		len += snprintf(buf + len, SZ_4K - len,
-		"%s %d %d %d %d %d %d %d %d %d 0x%x\n",
-		mode->name, mode->vrefresh, mode->hdisplay,
-		mode->hsync_start, mode->hsync_end, mode->htotal,
-		mode->vdisplay, mode->vsync_start, mode->vsync_end,
-		mode->vtotal, mode->flags);
+		"%s %d %d %d %d %d %d %d %d %d %d 0x%x\n",
+		mode->name, mode->vrefresh, mode->picture_aspect_ratio,
+		mode->hdisplay, mode->hsync_start, mode->hsync_end,
+		mode->htotal, mode->vdisplay, mode->vsync_start,
+		mode->vsync_end, mode->vtotal, mode->flags);
 	}
 
 	if (copy_to_user(user_buff, buf, len)) {
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 7fd5330..d5a9301 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -28,6 +28,7 @@
  */
 struct dp_debug {
 	bool debug_en;
+	int aspect_ratio;
 	int vdisplay;
 	int hdisplay;
 	int vrefresh;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 06f8558..254343a 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -514,25 +514,19 @@ enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
 	dp_disp = display;
 	debug = dp_disp->get_debug(dp_disp);
 
-	if (debug->debug_en) {
-		if (mode->hdisplay == debug->hdisplay &&
-				mode->vdisplay == debug->vdisplay &&
-				mode->vrefresh == debug->vrefresh &&
-				mode->clock <= dp_disp->max_pclk_khz)
-			return MODE_OK;
-		else
-			return MODE_ERROR;
-	} else {
-		if (mode->vrefresh == 0) {
-			int vrefresh = (mode->clock * 1000) /
-				(mode->vtotal * mode->htotal);
-			if (vrefresh > 60)
-				return MODE_BAD;
-		}
+	mode->vrefresh = drm_mode_vrefresh(mode);
 
-		if (mode->clock > dp_disp->max_pclk_khz)
-			return MODE_BAD;
-		else
-			return MODE_OK;
-	}
+	if (mode->vrefresh > 60)
+		return MODE_BAD;
+
+	if (mode->clock > dp_disp->max_pclk_khz)
+		return MODE_BAD;
+
+	if (debug->debug_en && (mode->hdisplay != debug->hdisplay ||
+			mode->vdisplay != debug->vdisplay ||
+			mode->vrefresh != debug->vrefresh ||
+			mode->picture_aspect_ratio != debug->aspect_ratio))
+		return MODE_BAD;
+
+	return MODE_OK;
 }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0f48db6..2f9571b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1369,7 +1369,9 @@ void msm_mode_object_event_notify(struct drm_mode_object *obj,
 			continue;
 		len = event->length + sizeof(struct drm_msm_event_resp);
 		if (node->base.file_priv->event_space < len) {
-			DRM_ERROR("Insufficient space to notify\n");
+			DRM_ERROR("Insufficient space %d for event %x len %d\n",
+				node->base.file_priv->event_space, event->type,
+				len);
 			continue;
 		}
 		notify = kzalloc(len, GFP_ATOMIC);
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index faef4ce..e9ffb96 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -62,6 +62,8 @@ static void dspp_gc_install_property(struct drm_crtc *crtc);
 
 static void dspp_igc_install_property(struct drm_crtc *crtc);
 
+static void dspp_hist_install_property(struct drm_crtc *crtc);
+
 typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc);
 
 static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
@@ -77,6 +79,8 @@ static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg);
 static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
 		enum ad_property ad_prop);
 
+static void sde_cp_notify_hist_event(struct drm_crtc *crtc_drm, void *arg);
+
 #define setup_dspp_prop_install_funcs(func) \
 do { \
 	func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
@@ -86,6 +90,7 @@ do { \
 	func[SDE_DSPP_GAMUT] = dspp_gamut_install_property; \
 	func[SDE_DSPP_GC] = dspp_gc_install_property; \
 	func[SDE_DSPP_IGC] = dspp_igc_install_property; \
+	func[SDE_DSPP_HIST] = dspp_hist_install_property; \
 } while (0)
 
 typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc);
@@ -111,7 +116,8 @@ enum {
 	SDE_CP_CRTC_DSPP_SIXZONE,
 	SDE_CP_CRTC_DSPP_GAMUT,
 	SDE_CP_CRTC_DSPP_DITHER,
-	SDE_CP_CRTC_DSPP_HIST,
+	SDE_CP_CRTC_DSPP_HIST_CTRL,
+	SDE_CP_CRTC_DSPP_HIST_IRQ,
 	SDE_CP_CRTC_DSPP_AD,
 	SDE_CP_CRTC_DSPP_VLUT,
 	SDE_CP_CRTC_DSPP_AD_MODE,
@@ -365,6 +371,13 @@ void sde_cp_crtc_init(struct drm_crtc *crtc)
 		return;
 	}
 
+	/* create blob to store histogram data */
+	sde_crtc->hist_blob = drm_property_create_blob(crtc->dev,
+				sizeof(struct drm_msm_hist), NULL);
+	if (IS_ERR(sde_crtc->hist_blob))
+		sde_crtc->hist_blob = NULL;
+
+	mutex_init(&sde_crtc->crtc_cp_lock);
 	INIT_LIST_HEAD(&sde_crtc->active_list);
 	INIT_LIST_HEAD(&sde_crtc->dirty_list);
 	INIT_LIST_HEAD(&sde_crtc->feature_list);
@@ -531,6 +544,77 @@ static void sde_cp_crtc_install_enum_property(struct drm_crtc *crtc,
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
 
+static struct sde_crtc_irq_info *_sde_cp_get_intr_node(u32 event,
+				struct sde_crtc *sde_crtc)
+{
+	bool found = false;
+	struct sde_crtc_irq_info *node = NULL;
+
+	list_for_each_entry(node, &sde_crtc->user_event_list, list) {
+		if (node->event == event) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		node = NULL;
+
+	return node;
+}
+
+static void _sde_cp_crtc_enable_hist_irq(struct sde_crtc *sde_crtc)
+{
+	struct drm_crtc *crtc_drm = &sde_crtc->base;
+	struct sde_kms *kms = NULL;
+	struct sde_hw_mixer *hw_lm;
+	struct sde_hw_dspp *hw_dspp = NULL;
+	struct sde_crtc_irq_info *node = NULL;
+	int i, irq_idx, ret = 0;
+	unsigned long flags;
+
+	if (!crtc_drm) {
+		DRM_ERROR("invalid crtc %pK\n", crtc_drm);
+		return;
+	}
+
+	kms = get_kms(crtc_drm);
+
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		hw_lm = sde_crtc->mixers[i].hw_lm;
+		hw_dspp = sde_crtc->mixers[i].hw_dspp;
+		if (!hw_lm->cfg.right_mixer)
+			break;
+	}
+
+	if (!hw_dspp) {
+		DRM_ERROR("invalid dspp\n");
+		return;
+	}
+
+	irq_idx = sde_core_irq_idx_lookup(kms, SDE_IRQ_TYPE_HIST_DSPP_DONE,
+					hw_dspp->idx);
+	if (irq_idx < 0) {
+		DRM_ERROR("failed to get irq idx\n");
+		return;
+	}
+
+	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+	node = _sde_cp_get_intr_node(DRM_EVENT_HISTOGRAM, sde_crtc);
+	spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
+	if (!node)
+		return;
+
+	if (node->state == IRQ_DISABLED) {
+		ret = sde_core_irq_enable(kms, &irq_idx, 1);
+		if (ret)
+			DRM_ERROR("failed to enable irq %d\n", irq_idx);
+		else
+			node->state = IRQ_ENABLED;
+	}
+}
+
 static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
 				   struct sde_crtc *sde_crtc)
 {
@@ -639,6 +723,21 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
 			}
 			hw_lm->ops.setup_gc(hw_lm, &hw_cfg);
 			break;
+		case SDE_CP_CRTC_DSPP_HIST_CTRL:
+			if (!hw_dspp || !hw_dspp->ops.setup_histogram) {
+				ret = -EINVAL;
+				continue;
+			}
+			hw_dspp->ops.setup_histogram(hw_dspp, &feature_enabled);
+			break;
+		case SDE_CP_CRTC_DSPP_HIST_IRQ:
+			if (!hw_dspp || !hw_lm) {
+				ret = -EINVAL;
+				continue;
+			}
+			if (!hw_lm->cfg.right_mixer)
+				_sde_cp_crtc_enable_hist_irq(sde_crtc);
+			break;
 		case SDE_CP_CRTC_DSPP_AD_MODE:
 			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
 				ret = -EINVAL;
@@ -746,6 +845,8 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc)
 		return;
 	}
 
+	mutex_lock(&sde_crtc->crtc_cp_lock);
+
 	/* Check if dirty lists are empty and ad features are disabled for
 	 * early return. If ad properties are active then we need to issue
 	 * dspp flush.
@@ -754,7 +855,7 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc)
 		list_empty(&sde_crtc->ad_dirty)) {
 		if (list_empty(&sde_crtc->ad_active)) {
 			DRM_DEBUG_DRIVER("Dirty list is empty\n");
-			return;
+			goto exit;
 		}
 		sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESET);
 		set_dspp_flush = true;
@@ -794,6 +895,8 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc)
 			ctl->ops.update_pending_flush(ctl, flush_mask);
 		}
 	}
+exit:
+	mutex_unlock(&sde_crtc->crtc_cp_lock);
 }
 
 void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
@@ -824,13 +927,15 @@ void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
 		return;
 	}
 
+	mutex_lock(&sde_crtc->crtc_cp_lock);
+
 	/**
 	 * Function can be called during the atomic_check with test_only flag
 	 * and actual commit. Allocate properties only if feature list is
 	 * empty during the atomic_check with test_only flag.
 	 */
 	if (!list_empty(&sde_crtc->feature_list))
-		return;
+		goto exit;
 
 	catalog = kms->catalog;
 	priv = crtc->dev->dev_private;
@@ -846,7 +951,7 @@ void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
 		setup_lm_prop_install_funcs(lm_prop_install_func);
 	}
 	if (!priv->cp_property)
-		return;
+		goto exit;
 
 	if (!catalog->dspp_count)
 		goto lm_property;
@@ -862,7 +967,7 @@ void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
 
 lm_property:
 	if (!catalog->mixer_count)
-		return;
+		goto exit;
 
 	/* Check for all the LM properties and attach it to CRTC */
 	features = catalog->mixer[0].features;
@@ -872,6 +977,9 @@ void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
 		if (lm_prop_install_func[i])
 			lm_prop_install_func[i](crtc);
 	}
+exit:
+	mutex_unlock(&sde_crtc->crtc_cp_lock);
+
 }
 
 int sde_cp_crtc_set_property(struct drm_crtc *crtc,
@@ -894,6 +1002,7 @@ int sde_cp_crtc_set_property(struct drm_crtc *crtc,
 		return -EINVAL;
 	}
 
+	mutex_lock(&sde_crtc->crtc_cp_lock);
 	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
 		if (property->base.id == prop_node->property_id) {
 			found = 1;
@@ -902,7 +1011,8 @@ int sde_cp_crtc_set_property(struct drm_crtc *crtc,
 	}
 
 	if (!found)
-		return 0;
+		goto exit;
+
 	/**
 	 * sde_crtc is virtual ensure that hardware has been attached to the
 	 * crtc. Check LM and dspp counts based on whether feature is a
@@ -912,7 +1022,8 @@ int sde_cp_crtc_set_property(struct drm_crtc *crtc,
 	    sde_crtc->num_mixers > ARRAY_SIZE(sde_crtc->mixers)) {
 		DRM_ERROR("Invalid mixer config act cnt %d max cnt %ld\n",
 			sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
-		return -EINVAL;
+		ret = -EINVAL;
+		goto exit;
 	}
 
 	dspp_cnt = 0;
@@ -927,17 +1038,19 @@ int sde_cp_crtc_set_property(struct drm_crtc *crtc,
 	if (prop_node->is_dspp_feature && dspp_cnt < sde_crtc->num_mixers) {
 		DRM_ERROR("invalid dspp cnt %d mixer cnt %d\n", dspp_cnt,
 			sde_crtc->num_mixers);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto exit;
 	} else if (lm_cnt < sde_crtc->num_mixers) {
 		DRM_ERROR("invalid lm cnt %d mixer cnt %d\n", lm_cnt,
 			sde_crtc->num_mixers);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto exit;
 	}
 
 	ret = sde_cp_ad_validate_prop(prop_node, sde_crtc);
 	if (ret) {
 		DRM_ERROR("ad property validation failed ret %d\n", ret);
-		return ret;
+		goto exit;
 	}
 
 	/* remove the property from dirty list */
@@ -955,6 +1068,8 @@ int sde_cp_crtc_set_property(struct drm_crtc *crtc,
 		/* Mark the feature as dirty */
 		sde_cp_update_list(prop_node, sde_crtc, true);
 	}
+exit:
+	mutex_unlock(&sde_crtc->crtc_cp_lock);
 	return ret;
 }
 
@@ -977,12 +1092,14 @@ int sde_cp_crtc_get_property(struct drm_crtc *crtc,
 	}
 	/* Return 0 if property is not supported */
 	*val = 0;
+	mutex_lock(&sde_crtc->crtc_cp_lock);
 	list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
 		if (property->base.id == prop_node->property_id) {
 			*val = prop_node->prop_val;
 			break;
 		}
 	}
+	mutex_unlock(&sde_crtc->crtc_cp_lock);
 	return 0;
 }
 
@@ -1015,6 +1132,10 @@ void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc)
 		kfree(prop_node);
 	}
 
+	if (sde_crtc->hist_blob)
+		drm_property_unreference_blob(sde_crtc->hist_blob);
+
+	mutex_destroy(&sde_crtc->crtc_cp_lock);
 	INIT_LIST_HEAD(&sde_crtc->active_list);
 	INIT_LIST_HEAD(&sde_crtc->dirty_list);
 	INIT_LIST_HEAD(&sde_crtc->feature_list);
@@ -1035,6 +1156,7 @@ void sde_cp_crtc_suspend(struct drm_crtc *crtc)
 		return;
 	}
 
+	mutex_lock(&sde_crtc->crtc_cp_lock);
 	list_for_each_entry_safe(prop_node, n, &sde_crtc->active_list,
 				 active_list) {
 		sde_cp_update_list(prop_node, sde_crtc, true);
@@ -1046,6 +1168,7 @@ void sde_cp_crtc_suspend(struct drm_crtc *crtc)
 		sde_cp_update_list(prop_node, sde_crtc, true);
 		list_del_init(&prop_node->active_list);
 	}
+	mutex_unlock(&sde_crtc->crtc_cp_lock);
 }
 
 void sde_cp_crtc_resume(struct drm_crtc *crtc)
@@ -1273,6 +1396,30 @@ static void dspp_igc_install_property(struct drm_crtc *crtc)
 	}
 }
 
+static void dspp_hist_install_property(struct drm_crtc *crtc)
+{
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
+	u32 version;
+
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+
+	version = catalog->dspp[0].sblk->hist.version >> 16;
+	switch (version) {
+	case 1:
+		sde_cp_crtc_install_enum_property(crtc,
+			SDE_CP_CRTC_DSPP_HIST_CTRL, sde_hist_modes,
+			ARRAY_SIZE(sde_hist_modes), "SDE_DSPP_HIST_CTRL_V1");
+		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_HIST_IRQ_V1",
+			SDE_CP_CRTC_DSPP_HIST_IRQ, 0, U16_MAX, 0);
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
+
 static void sde_cp_update_list(struct sde_cp_node *prop_node,
 		struct sde_crtc *crtc, bool dirty_list)
 {
@@ -1394,6 +1541,7 @@ int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en,
 	int i;
 	int irq_idx, ret;
 	struct sde_cp_node prop_node;
+	struct sde_crtc_irq_info *node = NULL;
 
 	if (!crtc_drm || !ad_irq) {
 		DRM_ERROR("invalid crtc %pK irq %pK\n", crtc_drm, ad_irq);
@@ -1438,8 +1586,23 @@ int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en,
 		goto exit;
 	}
 
+	node = _sde_cp_get_intr_node(DRM_EVENT_AD_BACKLIGHT, crtc);
+
 	if (!en) {
-		sde_core_irq_disable(kms, &irq_idx, 1);
+		if (node) {
+			if (node->state == IRQ_ENABLED) {
+				ret = sde_core_irq_disable(kms, &irq_idx, 1);
+				if (ret)
+					DRM_ERROR("disable irq %d error %d\n",
+						irq_idx, ret);
+				else
+					node->state = IRQ_NOINIT;
+			} else {
+				node->state = IRQ_NOINIT;
+			}
+		} else {
+			DRM_ERROR("failed to get node from crtc event list\n");
+		}
 		sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
 		ret = 0;
 		goto exit;
@@ -1452,10 +1615,30 @@ int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en,
 		DRM_ERROR("failed to register the callback ret %d\n", ret);
 		goto exit;
 	}
-	ret = sde_core_irq_enable(kms, &irq_idx, 1);
-	if (ret) {
-		DRM_ERROR("failed to enable irq ret %d\n", ret);
-		sde_core_irq_unregister_callback(kms, irq_idx, ad_irq);
+
+	if (node) {
+		/* device resume or resume from IPC cases */
+		if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
+			ret = sde_core_irq_enable(kms, &irq_idx, 1);
+			if (ret) {
+				DRM_ERROR("enable irq %d error %d\n",
+					irq_idx, ret);
+				sde_core_irq_unregister_callback(kms,
+					irq_idx, ad_irq);
+			} else {
+				node->state = IRQ_ENABLED;
+			}
+		}
+	} else {
+		/* request from userspace to register the event
+		 * in this case, node has not been added into the event list
+		 */
+		ret = sde_core_irq_enable(kms, &irq_idx, 1);
+		if (ret) {
+			DRM_ERROR("failed to enable irq ret %d\n", ret);
+			sde_core_irq_unregister_callback(kms,
+				irq_idx, ad_irq);
+		}
 	}
 exit:
 	return ret;
@@ -1518,3 +1701,201 @@ void sde_cp_crtc_post_ipc(struct drm_crtc *drm_crtc)
 
 	sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESUME);
 }
+
+static void sde_cp_hist_interrupt_cb(void *arg, int irq_idx)
+{
+	struct sde_crtc *crtc = arg;
+	struct drm_crtc *crtc_drm = &crtc->base;
+	struct sde_hw_dspp *hw_dspp;
+	struct sde_kms *kms;
+	struct sde_crtc_irq_info *node = NULL;
+	u32 i;
+	int ret = 0;
+	unsigned long flags;
+
+	/* disable histogram irq */
+	kms = get_kms(crtc_drm);
+	spin_lock_irqsave(&crtc->spin_lock, flags);
+	node = _sde_cp_get_intr_node(DRM_EVENT_HISTOGRAM, crtc);
+	spin_unlock_irqrestore(&crtc->spin_lock, flags);
+
+	if (!node) {
+		DRM_ERROR("cannot find histogram event node in crtc\n");
+		return;
+	}
+
+	if (node->state == IRQ_ENABLED) {
+		if (sde_core_irq_disable_nolock(kms, irq_idx)) {
+			DRM_ERROR("failed to disable irq %d, ret %d\n",
+				irq_idx, ret);
+			return;
+		}
+		node->state = IRQ_DISABLED;
+	}
+
+	/* lock histogram buffer */
+	for (i = 0; i < crtc->num_mixers; i++) {
+		hw_dspp = crtc->mixers[i].hw_dspp;
+		if (hw_dspp && hw_dspp->ops.lock_histogram)
+			hw_dspp->ops.lock_histogram(hw_dspp, NULL);
+	}
+
+	/* notify histogram event */
+	sde_crtc_event_queue(crtc_drm, sde_cp_notify_hist_event, NULL);
+}
+
+static void sde_cp_notify_hist_event(struct drm_crtc *crtc_drm, void *arg)
+{
+	struct sde_hw_dspp *hw_dspp = NULL;
+	struct sde_crtc *crtc;
+	struct drm_event event;
+	struct drm_msm_hist *hist_data;
+	struct drm_msm_hist tmp_hist_data;
+	u32 i, j;
+
+	if (!crtc_drm) {
+		DRM_ERROR("invalid crtc %pK\n", crtc_drm);
+		return;
+	}
+
+	crtc = to_sde_crtc(crtc_drm);
+	if (!crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", crtc);
+		return;
+	}
+
+	if (!crtc->hist_blob)
+		return;
+
+	/* read histogram data into blob */
+	hist_data = (struct drm_msm_hist *)crtc->hist_blob->data;
+	for (i = 0; i < crtc->num_mixers; i++) {
+		hw_dspp = crtc->mixers[i].hw_dspp;
+		if (!hw_dspp || !hw_dspp->ops.read_histogram) {
+			DRM_ERROR("invalid dspp %pK or read_histogram func\n",
+				hw_dspp);
+			return;
+		}
+		if (!i) {
+			hw_dspp->ops.read_histogram(hw_dspp, hist_data);
+		} else {
+			/* Merge hist data for DSPP0 and DSPP1 */
+			hw_dspp->ops.read_histogram(hw_dspp, &tmp_hist_data);
+			for (j = 0; j < HIST_V_SIZE; j++)
+				hist_data->data[j] += tmp_hist_data.data[j];
+		}
+	}
+
+	/* send histogram event with blob id */
+	event.length = sizeof(u32);
+	event.type = DRM_EVENT_HISTOGRAM;
+	msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
+			&event, (u8 *)(&crtc->hist_blob->base.id));
+}
+
+int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
+	struct sde_irq_callback *hist_irq)
+{
+	struct sde_kms *kms = NULL;
+	u32 num_mixers;
+	struct sde_hw_mixer *hw_lm;
+	struct sde_hw_dspp *hw_dspp = NULL;
+	struct sde_crtc *crtc;
+	struct sde_crtc_irq_info *node = NULL;
+	int i, irq_idx, ret = 0;
+
+	if (!crtc_drm || !hist_irq) {
+		DRM_ERROR("invalid crtc %pK irq %pK\n", crtc_drm, hist_irq);
+		return -EINVAL;
+	}
+
+	crtc = to_sde_crtc(crtc_drm);
+	if (!crtc) {
+		DRM_ERROR("invalid sde_crtc %pK\n", crtc);
+		return -EINVAL;
+	}
+
+	kms = get_kms(crtc_drm);
+	num_mixers = crtc->num_mixers;
+
+	for (i = 0; i < num_mixers; i++) {
+		hw_lm = crtc->mixers[i].hw_lm;
+		hw_dspp = crtc->mixers[i].hw_dspp;
+		if (!hw_lm->cfg.right_mixer)
+			break;
+	}
+
+	if (!hw_dspp) {
+		DRM_ERROR("invalid dspp\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	irq_idx = sde_core_irq_idx_lookup(kms, SDE_IRQ_TYPE_HIST_DSPP_DONE,
+			hw_dspp->idx);
+	if (irq_idx < 0) {
+		DRM_ERROR("failed to get the irq idx ret %d\n", irq_idx);
+		ret = irq_idx;
+		goto exit;
+	}
+
+	node = _sde_cp_get_intr_node(DRM_EVENT_HISTOGRAM, crtc);
+
+	/* deregister histogram irq */
+	if (!en) {
+		if (node) {
+			/* device suspend case or suspend to IPC cases */
+			if (node->state == IRQ_ENABLED) {
+				ret = sde_core_irq_disable(kms, &irq_idx, 1);
+				if (ret)
+					DRM_ERROR("disable irq %d error %d\n",
+						irq_idx, ret);
+				else
+					node->state = IRQ_NOINIT;
+			} else {
+				node->state = IRQ_NOINIT;
+			}
+		} else {
+			DRM_ERROR("failed to get node from crtc event list\n");
+		}
+
+		sde_core_irq_unregister_callback(kms, irq_idx, hist_irq);
+		goto exit;
+	}
+
+	/* register histogram irq */
+	hist_irq->arg = crtc;
+	hist_irq->func = sde_cp_hist_interrupt_cb;
+	ret = sde_core_irq_register_callback(kms, irq_idx, hist_irq);
+	if (ret) {
+		DRM_ERROR("failed to register the callback ret %d\n", ret);
+		goto exit;
+	}
+
+	if (node) {
+		/* device resume or resume from IPC cases */
+		if (node->state == IRQ_DISABLED || node->state == IRQ_NOINIT) {
+			ret = sde_core_irq_enable(kms, &irq_idx, 1);
+			if (ret) {
+				DRM_ERROR("enable irq %d error %d\n",
+					irq_idx, ret);
+				sde_core_irq_unregister_callback(kms,
+					irq_idx, hist_irq);
+			} else {
+				node->state = IRQ_ENABLED;
+			}
+		}
+	} else {
+		/* request from userspace to register the event
+		 * in this case, node has not been added into the event list
+		 */
+		ret = sde_core_irq_enable(kms, &irq_idx, 1);
+		if (ret) {
+			DRM_ERROR("failed to enable irq ret %d\n", ret);
+			sde_core_irq_unregister_callback(kms,
+				irq_idx, hist_irq);
+		}
+	}
+exit:
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
index 08e345d..aff07ef 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.h
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -29,6 +29,25 @@ enum sde_memcolor_type {
 	MEMCOLOR_FOLIAGE
 };
 
+/*
+ * PA HISTOGRAM modes
+ * @HIST_DISABLED          Histogram disabled
+ * @HIST_ENABLED           Histogram enabled
+ */
+enum sde_hist_modes {
+	HIST_DISABLED,
+	HIST_ENABLED
+};
+
+/**
+ * struct drm_prop_enum_list - drm structure for creating enum property and
+ *                             enumerating values
+ */
+static const struct drm_prop_enum_list sde_hist_modes[] = {
+	{HIST_DISABLED, "hist_off"},
+	{HIST_ENABLED, "hist_on"},
+};
+
 /**
  * sde_cp_crtc_init(): Initialize color processing lists for a crtc.
  *                     Should be called during crtc initialization.
@@ -117,4 +136,13 @@ void sde_cp_crtc_pre_ipc(struct drm_crtc *crtc);
  * @crtc: Pointer to crtc.
  */
 void sde_cp_crtc_post_ipc(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_hist_interrupt: Api to enable/disable histogram interrupt
+ * @crtc: Pointer to crtc.
+ * @en: Variable to enable/disable interrupt.
+ * @irq: Pointer to irq callback
+ */
+int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
+	struct sde_irq_callback *hist_irq);
 #endif /*_SDE_COLOR_PROCESSING_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 4b7ec6a..23ca2e9 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -48,14 +48,6 @@
 #define MEM_PROTECT_SD_CTRL_SWITCH 0x18
 #define MDP_DEVICE_ID            0x1A
 
-struct sde_crtc_irq_info {
-	struct sde_irq_callback irq;
-	u32 event;
-	int (*func)(struct drm_crtc *crtc, bool en,
-			struct sde_irq_callback *irq);
-	struct list_head list;
-};
-
 struct sde_crtc_custom_events {
 	u32 event;
 	int (*func)(struct drm_crtc *crtc, bool en,
@@ -70,7 +62,8 @@ static int sde_crtc_idle_interrupt_handler(struct drm_crtc *crtc_drm,
 static struct sde_crtc_custom_events custom_events[] = {
 	{DRM_EVENT_AD_BACKLIGHT, sde_cp_ad_interrupt},
 	{DRM_EVENT_CRTC_POWER, sde_crtc_power_interrupt_handler},
-	{DRM_EVENT_IDLE_NOTIFY, sde_crtc_idle_interrupt_handler}
+	{DRM_EVENT_IDLE_NOTIFY, sde_crtc_idle_interrupt_handler},
+	{DRM_EVENT_HISTOGRAM, sde_cp_hist_interrupt},
 };
 
 /* default input fence timeout, in ms */
@@ -3229,7 +3222,7 @@ static void sde_crtc_destroy_state(struct drm_crtc *crtc,
 static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc;
-	int ret, rc = 0;
+	int ret, rc = 0, i;
 
 	if (!crtc) {
 		SDE_ERROR("invalid argument\n");
@@ -3242,7 +3235,17 @@ static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc)
 		return 0;
 	}
 
-	SDE_EVT32_VERBOSE(DRMID(crtc), SDE_EVTLOG_FUNC_ENTRY);
+	SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_ENTRY);
+
+	/*
+	 * flush all the event thread work to make sure all the
+	 * FRAME_EVENTS from encoder are propagated to crtc
+	 */
+	for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) {
+		if (list_empty(&sde_crtc->frame_events[i].list))
+			kthread_flush_work(&sde_crtc->frame_events[i].work);
+	}
+
 	ret = wait_for_completion_timeout(&sde_crtc->frame_done_comp,
 			msecs_to_jiffies(SDE_FRAME_DONE_TIMEOUT));
 	if (!ret) {
@@ -5471,6 +5474,8 @@ static int _sde_crtc_event_enable(struct sde_kms *kms,
 
 	if (!ret) {
 		spin_lock_irqsave(&crtc->spin_lock, flags);
+		/* irq is regiestered and enabled and set the state */
+		node->state = IRQ_ENABLED;
 		list_add_tail(&node->list, &crtc->user_event_list);
 		spin_unlock_irqrestore(&crtc->spin_lock, flags);
 	} else {
@@ -5494,7 +5499,6 @@ static int _sde_crtc_event_disable(struct sde_kms *kms,
 	spin_lock_irqsave(&crtc->spin_lock, flags);
 	list_for_each_entry(node, &crtc->user_event_list, list) {
 		if (node->event == event) {
-			list_del(&node->list);
 			found = true;
 			break;
 		}
@@ -5510,12 +5514,15 @@ static int _sde_crtc_event_disable(struct sde_kms *kms,
 	 * no need to disable/de-register.
 	 */
 	if (!crtc_drm->enabled) {
+		list_del(&node->list);
 		kfree(node);
 		return 0;
 	}
 	priv = kms->dev->dev_private;
 	sde_power_resource_enable(&priv->phandle, kms->core_client, true);
 	ret = node->func(crtc_drm, false, &node->irq);
+	list_del(&node->list);
+	kfree(node);
 	sde_power_resource_enable(&priv->phandle, kms->core_client, false);
 	return ret;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index c459c62..59bfc47 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -256,6 +256,7 @@ struct sde_crtc {
 	struct list_head user_event_list;
 
 	struct mutex crtc_lock;
+	struct mutex crtc_cp_lock;
 
 	atomic_t frame_pending;
 	struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
@@ -284,6 +285,9 @@ struct sde_crtc {
 	struct list_head rp_head;
 
 	struct sde_crtc_smmu_state_data smmu_state;
+
+	/* blob for histogram data */
+	struct drm_property_blob *hist_blob;
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -406,6 +410,29 @@ struct sde_crtc_state {
 	struct sde_crtc_respool rp;
 };
 
+enum sde_crtc_irq_state {
+	IRQ_NOINIT,
+	IRQ_ENABLED,
+	IRQ_DISABLED,
+};
+
+/**
+ * sde_crtc_irq_info - crtc interrupt info
+ * @irq: interrupt callback
+ * @event: event type of the interrupt
+ * @func: function pointer to enable/disable the interrupt
+ * @list: list of user customized event in crtc
+ * @ref_count: reference count for the interrupt
+ */
+struct sde_crtc_irq_info {
+	struct sde_irq_callback irq;
+	u32 event;
+	int (*func)(struct drm_crtc *crtc, bool en,
+			struct sde_irq_callback *irq);
+	struct list_head list;
+	enum sde_crtc_irq_state state;
+};
+
 #define to_sde_crtc_state(x) \
 	container_of(x, struct sde_crtc_state, base)
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
index 304106d..7a391ae 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -81,6 +81,7 @@ static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm,
 		if (cdm == m->cdm[i].id) {
 			b->base_off = addr;
 			b->blk_off = m->cdm[i].base;
+			b->length = m->cdm[i].len;
 			b->hwversion = m->hwversion;
 			b->log_mask = SDE_DBG_MASK_CDM;
 			return &m->cdm[i];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
index c8e732a..8e54a2a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -24,6 +24,9 @@
 #define PA_VAL_DSPP_OFF		0x240
 #define PA_CONT_DSPP_OFF	0x244
 
+#define PA_HIST_CTRL_DSPP_OFF	0x4
+#define PA_HIST_DATA_DSPP_OFF	0x400
+
 #define PA_LUTV_DSPP_OFF	0x1400
 #define PA_LUT_SWAP_OFF		0x234
 
@@ -70,6 +73,7 @@
 #define DSPP_OP_PA_CONT_EN	BIT(28)
 #define DSPP_OP_PA_EN		BIT(20)
 #define DSPP_OP_PA_LUTV_EN	BIT(19)
+#define DSPP_OP_PA_HIST_EN	BIT(16)
 #define DSPP_OP_PA_SKIN_EN	BIT(5)
 #define DSPP_OP_PA_FOL_EN	BIT(6)
 #define DSPP_OP_PA_SKY_EN	BIT(7)
@@ -563,3 +567,69 @@ void sde_setup_dspp_gc_v1_7(struct sde_hw_dspp *ctx, void *cfg)
 	i = BIT(0) | ((payload->flags & PGC_8B_ROUND) ? BIT(1) : 0);
 	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->gc.base, i);
 }
+
+void sde_setup_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	u32 base, offset;
+	u32 op_mode;
+	bool feature_enabled;
+
+	if (!ctx || !cfg) {
+		DRM_ERROR("invalid parameters ctx %pK cfg %pK", ctx, cfg);
+		return;
+	}
+
+	feature_enabled = *(bool *)cfg;
+	base = ctx->cap->sblk->hist.base;
+	offset = base + PA_HIST_CTRL_DSPP_OFF;
+
+	op_mode = SDE_REG_READ(&ctx->hw, base);
+	if (!feature_enabled) {
+		op_mode &= ~DSPP_OP_PA_HIST_EN;
+		if (PA_DSPP_DISABLE_REQUIRED(op_mode))
+			op_mode &= ~DSPP_OP_PA_EN;
+	} else {
+		op_mode |= DSPP_OP_PA_HIST_EN | DSPP_OP_PA_EN;
+	}
+
+	SDE_REG_WRITE(&ctx->hw, offset, 0);
+	SDE_REG_WRITE(&ctx->hw, base, op_mode);
+}
+
+void sde_read_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	struct drm_msm_hist *hist_data;
+	u32 offset, offset_ctl;
+	u32 i;
+
+	if (!ctx || !cfg) {
+		DRM_ERROR("invalid parameters ctx %pK cfg %pK", ctx, cfg);
+		return;
+	}
+
+	hist_data = (struct drm_msm_hist *)cfg;
+	offset = ctx->cap->sblk->hist.base + PA_HIST_DATA_DSPP_OFF;
+	offset_ctl = ctx->cap->sblk->hist.base + PA_HIST_CTRL_DSPP_OFF;
+
+	for (i = 0; i < HIST_V_SIZE; i++)
+		hist_data->data[i] = SDE_REG_READ(&ctx->hw, offset + i * 4) &
+					REG_MASK(24);
+
+	/* unlock hist buffer */
+	SDE_REG_WRITE(&ctx->hw, offset_ctl, 0);
+}
+
+void sde_lock_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+	u32 offset_ctl;
+
+	if (!ctx) {
+		DRM_ERROR("invalid parameters ctx %pK", ctx);
+		return;
+	}
+
+	offset_ctl = ctx->cap->sblk->hist.base + PA_HIST_CTRL_DSPP_OFF;
+
+	/* lock hist buffer */
+	SDE_REG_WRITE(&ctx->hw, offset_ctl, 1);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
index 4cd2e5a..74018a3 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
@@ -89,4 +89,23 @@ void sde_setup_dspp_pa_vlut_v1_8(struct sde_hw_dspp *ctx, void *cfg);
  */
 void sde_setup_dspp_gc_v1_7(struct sde_hw_dspp *ctx, void *cfg);
 
+/**
+ * sde_setup_dspp_hist_v1_7 - setup DSPP histogram feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to histogram control data
+ */
+void sde_setup_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_read_dspp_hist_v1_7 - read DSPP histogram data in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to histogram data
+ */
+void sde_read_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_lock_dspp_hist_v1_7 - lock DSPP histogram buffer in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ */
+void sde_lock_dspp_hist_v1_7(struct sde_hw_dspp *ctx, void *cfg);
 #endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index d30c0ae..36e30b7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -90,6 +90,17 @@ static void _setup_dspp_ops(struct sde_hw_dspp *c, unsigned long features)
 					sde_setup_dspp_pa_vlut_v1_8;
 			}
 			break;
+		case SDE_DSPP_HIST:
+			if (c->cap->sblk->hist.version ==
+				(SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
+				c->ops.setup_histogram =
+				    sde_setup_dspp_hist_v1_7;
+				c->ops.read_histogram =
+				    sde_read_dspp_hist_v1_7;
+				c->ops.lock_histogram =
+				    sde_lock_dspp_hist_v1_7;
+			}
+			break;
 		case SDE_DSPP_GAMUT:
 			if (c->cap->sblk->gamut.version ==
 					SDE_COLOR_PROCESS_VER(0x4, 0)) {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 44b3831..4878fc6 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -38,6 +38,13 @@ struct sde_hw_dspp_ops {
 	void (*read_histogram)(struct sde_hw_dspp *ctx, void *cfg);
 
 	/**
+	 * lock_histogram - lock dspp histogram buffer
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to configuration
+	 */
+	void (*lock_histogram)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
 	 * setup_igc - update dspp igc
 	 * @ctx: Pointer to dspp context
 	 * @cfg: Pointer to configuration
diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.c b/drivers/gpu/drm/msm/sde/sde_reg_dma.c
index e38524f..a52abd9 100644
--- a/drivers/gpu/drm/msm/sde/sde_reg_dma.c
+++ b/drivers/gpu/drm/msm/sde/sde_reg_dma.c
@@ -99,9 +99,6 @@ int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
 		rc = init_v1(&reg_dma);
 		if (rc)
 			DRM_DEBUG("init v1 dma ops failed\n");
-		else
-			sde_dbg_reg_register_base("reg_dma", addr,
-					reg_dma.caps->len);
 		break;
 	default:
 		break;
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 7943745..8e5683f 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -554,7 +554,13 @@ static int _soft_reset(struct adreno_device *adreno_dev)
 	return 0;
 }
 
-
+/**
+ * adreno_irqctrl() - Enables/disables the RBBM interrupt mask
+ * @adreno_dev: Pointer to an adreno_device
+ * @state: 1 for masked or 0 for unmasked
+ * Power: The caller of this function must make sure to use OOBs
+ * so that we know that the GPU is powered on
+ */
 void adreno_irqctrl(struct adreno_device *adreno_dev, int state)
 {
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
@@ -599,7 +605,7 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct adreno_irq *irq_params = gpudev->irq;
 	irqreturn_t ret = IRQ_NONE;
-	unsigned int status = 0, tmp, int_bit;
+	unsigned int status = 0, fence = 0, tmp, int_bit;
 	int i;
 
 	atomic_inc(&adreno_dev->pending_irq_refcnt);
@@ -614,6 +620,17 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
 	if (gpudev->gpu_keepalive)
 		gpudev->gpu_keepalive(adreno_dev, true);
 
+	/*
+	 * If the AHB fence is not in ALLOW mode when we receive an RBBM
+	 * interrupt, something went wrong. Set a fault and change the
+	 * fence to ALLOW so we can clear the interrupt.
+	 */
+	adreno_readreg(adreno_dev, ADRENO_REG_GMU_AO_AHB_FENCE_CTRL, &fence);
+	if (fence != 0) {
+		KGSL_DRV_CRIT_RATELIMIT(device, "AHB fence is stuck in ISR\n");
+		return ret;
+	}
+
 	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
 
 	/*
@@ -1498,9 +1515,9 @@ static int _adreno_start(struct adreno_device *adreno_dev)
 
 	/* Send OOB request to turn on the GX */
 	if (gpudev->oob_set) {
-		status = gpudev->oob_set(adreno_dev, OOB_GPUSTART_SET_MASK,
-				OOB_GPUSTART_CHECK_MASK,
-				OOB_GPUSTART_CLEAR_MASK);
+		status = gpudev->oob_set(adreno_dev, OOB_GPU_SET_MASK,
+				OOB_GPU_CHECK_MASK,
+				OOB_GPU_CLEAR_MASK);
 		if (status)
 			goto error_mmu_off;
 	}
@@ -1599,17 +1616,28 @@ static int _adreno_start(struct adreno_device *adreno_dev)
 				pmqos_active_vote);
 
 	/* Send OOB request to allow IFPC */
-	if (gpudev->oob_clear)
-		gpudev->oob_clear(adreno_dev, OOB_GPUSTART_CLEAR_MASK);
+	if (gpudev->oob_clear) {
+		gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
+
+		/* If we made it this far, the BOOT OOB was sent to the GMU */
+		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+			gpudev->oob_clear(adreno_dev,
+					OOB_BOOT_SLUMBER_CLEAR_MASK);
+	}
 
 	return 0;
 
 error_oob_clear:
 	if (gpudev->oob_clear)
-		gpudev->oob_clear(adreno_dev, OOB_GPUSTART_CLEAR_MASK);
+		gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
 
 error_mmu_off:
 	kgsl_mmu_stop(&device->mmu);
+	if (gpudev->oob_clear &&
+			ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
+		gpudev->oob_clear(adreno_dev,
+				OOB_BOOT_SLUMBER_CLEAR_MASK);
+	}
 
 error_pwr_off:
 	/* set the state back to original state */
@@ -1667,11 +1695,22 @@ static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
 static int adreno_stop(struct kgsl_device *device)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	int error = 0;
 
 	if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
 		return 0;
 
-	adreno_set_active_ctxs_null(adreno_dev);
+	/* Turn the power on one last time before stopping */
+	if (gpudev->oob_set) {
+		error = gpudev->oob_set(adreno_dev, OOB_GPU_SET_MASK,
+				OOB_GPU_CHECK_MASK,
+				OOB_GPU_CLEAR_MASK);
+		if (error) {
+			gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
+			return error;
+		}
+	}
 
 	adreno_dispatcher_stop(adreno_dev);
 
@@ -1694,10 +1733,29 @@ static int adreno_stop(struct kgsl_device *device)
 	/* Save physical performance counter values before GPU power down*/
 	adreno_perfcounter_save(adreno_dev);
 
+	if (gpudev->oob_clear)
+		gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
+
+	/*
+	 * Saving perfcounters will use an OOB to put the GMU into
+	 * active state. Before continuing, we should wait for the
+	 * GMU to return to the lowest idle level. This is
+	 * because some idle level transitions require VBIF and MMU.
+	 */
+	if (gpudev->wait_for_lowest_idle &&
+			gpudev->wait_for_lowest_idle(adreno_dev))
+		return -EINVAL;
+
 	adreno_vbif_clear_pending_transactions(device);
 
 	kgsl_mmu_stop(&device->mmu);
 
+	/*
+	 * At this point, MMU is turned off so we can safely
+	 * destroy any pending contexts and their pagetables
+	 */
+	adreno_set_active_ctxs_null(adreno_dev);
+
 	clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
 
 	return 0;
@@ -2323,9 +2381,9 @@ int adreno_soft_reset(struct kgsl_device *device)
 	int ret;
 
 	if (gpudev->oob_set) {
-		ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
-				OOB_CPINIT_CHECK_MASK,
-				OOB_CPINIT_CLEAR_MASK);
+		ret = gpudev->oob_set(adreno_dev, OOB_GPU_SET_MASK,
+				OOB_GPU_CHECK_MASK,
+				OOB_GPU_CLEAR_MASK);
 		if (ret)
 			return ret;
 	}
@@ -2349,7 +2407,7 @@ int adreno_soft_reset(struct kgsl_device *device)
 		ret = _soft_reset(adreno_dev);
 	if (ret) {
 		if (gpudev->oob_clear)
-			gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
+			gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
 		return ret;
 	}
 
@@ -2399,7 +2457,7 @@ int adreno_soft_reset(struct kgsl_device *device)
 	adreno_perfcounter_restore(adreno_dev);
 
 	if (gpudev->oob_clear)
-		gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
+		gpudev->oob_clear(adreno_dev, OOB_GPU_CLEAR_MASK);
 
 	return ret;
 }
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 342a85a..3118375 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -638,6 +638,7 @@ enum adreno_regs {
 	ADRENO_REG_VBIF_VERSION,
 	ADRENO_REG_GBIF_HALT,
 	ADRENO_REG_GBIF_HALT_ACK,
+	ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
 	ADRENO_REG_GMU_AO_INTERRUPT_EN,
 	ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
 	ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
@@ -886,6 +887,7 @@ struct adreno_gpudev {
 	int (*rpmh_gpu_pwrctrl)(struct adreno_device *, unsigned int ops,
 				unsigned int arg1, unsigned int arg2);
 	bool (*hw_isidle)(struct adreno_device *);
+	int (*wait_for_lowest_idle)(struct adreno_device *);
 	int (*wait_for_gmu_idle)(struct adreno_device *);
 	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
 				unsigned int fsynr1);
@@ -893,6 +895,8 @@ struct adreno_gpudev {
 	int (*soft_reset)(struct adreno_device *);
 	bool (*gx_is_on)(struct adreno_device *);
 	bool (*sptprac_is_on)(struct adreno_device *);
+	unsigned int (*ccu_invalidate)(struct adreno_device *adreno_dev,
+				unsigned int *cmds);
 };
 
 /**
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 2d078ba..434fef8 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -61,8 +61,8 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
 };
 
 static void a5xx_irq_storm_worker(struct work_struct *work);
-static int _read_fw2_block_header(uint32_t *header, uint32_t id,
-	uint32_t major, uint32_t minor);
+static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
+	uint32_t id, uint32_t major, uint32_t minor);
 static void a5xx_gpmu_reset(struct work_struct *work);
 static int a5xx_gpmu_init(struct adreno_device *adreno_dev);
 
@@ -678,6 +678,7 @@ static int _load_gpmu_firmware(struct adreno_device *adreno_dev)
 	if (data[1] != GPMU_FIRMWARE_ID)
 		goto err;
 	ret = _read_fw2_block_header(&data[2],
+		data[0] - 2,
 		GPMU_FIRMWARE_ID,
 		adreno_dev->gpucore->gpmu_major,
 		adreno_dev->gpucore->gpmu_minor);
@@ -1200,8 +1201,8 @@ void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 	kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180);
 }
 
-static int _read_fw2_block_header(uint32_t *header, uint32_t id,
-				uint32_t major, uint32_t minor)
+static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
+			uint32_t id, uint32_t major, uint32_t minor)
 {
 	uint32_t header_size;
 	int i = 1;
@@ -1211,7 +1212,8 @@ static int _read_fw2_block_header(uint32_t *header, uint32_t id,
 
 	header_size = header[0];
 	/* Headers have limited size and always occur as pairs of words */
-	if (header_size > MAX_HEADER_SIZE || header_size % 2)
+	if (header_size >  MAX_HEADER_SIZE || header_size >= remain ||
+				header_size % 2 || header_size == 0)
 		return -EINVAL;
 	/* Sequences must have an identifying id first thing in their header */
 	if (id == GPMU_SEQUENCE_ID) {
@@ -1306,6 +1308,7 @@ static void _load_regfile(struct adreno_device *adreno_dev)
 		/* For now ignore blocks other than the LM sequence */
 		if (block[4] == LM_SEQUENCE_ID) {
 			ret = _read_fw2_block_header(&block[2],
+				block_size - 2,
 				GPMU_SEQUENCE_ID,
 				adreno_dev->gpucore->lm_major,
 				adreno_dev->gpucore->lm_minor);
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 301a4f8..1964517 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -265,7 +265,7 @@ static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev)
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 
 	if (kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
-		PAGE_SIZE, 0, KGSL_MEMDESC_PRIVILEGED,
+		PAGE_SIZE, KGSL_MEMFLAGS_GPUREADONLY, 0,
 		"powerup_register_list")) {
 		adreno_dev->pwrup_reglist.gpuaddr = 0;
 		return;
@@ -699,7 +699,7 @@ static void _set_ordinals(struct adreno_device *adreno_dev,
 		*cmds++ = lower_32_bits(gpuaddr);
 		*cmds++ = upper_32_bits(gpuaddr);
 		/* Size is in dwords */
-		*cmds++ = sizeof(a6xx_pwrup_reglist) >> 2;
+		*cmds++ = 0;
 	}
 
 	/* Pad rest of the cmds with 0's */
@@ -1014,7 +1014,6 @@ static int timed_poll_check(struct kgsl_device *device,
 			return 0;
 		/* Wait 100us to reduce unnecessary AHB bus traffic */
 		udelay(100);
-		cond_resched();
 	} while (!time_after(jiffies, t));
 
 	/* Double check one last time */
@@ -1026,6 +1025,15 @@ static int timed_poll_check(struct kgsl_device *device,
 }
 
 /*
+ * The lowest 16 bits of this value are the number of XO clock cycles
+ * for main hysteresis. This is the first hysteresis. Here we set it
+ * to 0x5DC cycles, or 78.1 us. The highest 16 bits of this value are
+ * the number of XO clock cycles for short hysteresis. This happens
+ * after main hysteresis. Here we set it to 0xA cycles, or 0.5 us.
+ */
+#define GMU_PWR_COL_HYST 0x000A05DC
+
+/*
  * a6xx_gmu_power_config() - Configure and enable GMU's low power mode
  * setting based on ADRENO feature flags.
  * @device: Pointer to KGSL device
@@ -1056,13 +1064,13 @@ static void a6xx_gmu_power_config(struct kgsl_device *device)
 		/* fall through */
 	case GPU_HW_IFPC:
 		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
-				0x000A0080);
+				GMU_PWR_COL_HYST);
 		kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
 				IFPC_ENABLE_MASK);
 		/* fall through */
 	case GPU_HW_SPTP_PC:
 		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
-				0x000A0080);
+				GMU_PWR_COL_HYST);
 		kgsl_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
 				SPTP_ENABLE_MASK);
 		/* fall through */
@@ -1281,21 +1289,12 @@ static bool a6xx_gx_is_on(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	unsigned int val;
-	bool state;
 
 	if (!kgsl_gmu_isenabled(device))
 		return true;
 
 	kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
-	state = !(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF));
-
-	/* If GMU is holding on to the fence then we cannot dump any GX stuff */
-	kgsl_gmu_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
-	if (val)
-		return false;
-
-	return state;
-
+	return !(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF));
 }
 
 /*
@@ -1366,12 +1365,13 @@ static int a6xx_notify_slumber(struct kgsl_device *device)
 	/* Disable the power counter so that the GMU is not busy */
 	kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
 
-	/* Turn off SPTPRAC before GMU turns off GX */
-	a6xx_sptprac_disable(adreno_dev);
+	/* Turn off SPTPRAC if we own it */
+	if (gmu->idle_level < GPU_HW_SPTP_PC)
+		a6xx_sptprac_disable(adreno_dev);
 
 	if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
 		ret = hfi_notify_slumber(gmu, perf_idx, bus_level);
-		return ret;
+		goto out;
 	}
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
@@ -1397,6 +1397,9 @@ static int a6xx_notify_slumber(struct kgsl_device *device)
 		}
 	}
 
+out:
+	/* Make sure the fence is in ALLOW mode */
+	kgsl_gmu_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
 	return ret;
 }
 
@@ -1407,7 +1410,9 @@ static int a6xx_rpmh_power_on_gpu(struct kgsl_device *device)
 	int val;
 
 	kgsl_gmu_regread(device, A6XX_GPU_CC_GX_DOMAIN_MISC, &val);
-	WARN_ON(!(val & 0x1));
+	if (!(val & 0x1))
+		dev_err_ratelimited(&gmu->pdev->dev,
+			"GMEM CLAMP IO not set while GFX rail off\n");
 
 	/* RSC wake sequence */
 	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
@@ -1564,6 +1569,8 @@ static void isense_cold_trimm(struct kgsl_device *device)
 	kgsl_gmu_regwrite(device, A6XX_GPU_CS_AMP_CALIBRATION_DONE, 1);
 
 }
+
+#define GPU_LIMIT_THRESHOLD_ENABLE	BIT(31)
 /*
  * a6xx_gmu_fw_start() - set up GMU and start FW
  * @device: Pointer to KGSL device
@@ -1647,7 +1654,7 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device,
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
 		test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag)) {
 		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD,
-			lm_limit(adreno_dev));
+			GPU_LIMIT_THRESHOLD_ENABLE | lm_limit(adreno_dev));
 		isense_cold_trimm(device);
 	}
 
@@ -1744,6 +1751,52 @@ static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
 	return true;
 }
 
+static int a6xx_wait_for_lowest_idle(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	unsigned int reg;
+	unsigned long t;
+
+	if (!kgsl_gmu_isenabled(device))
+		return 0;
+
+	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
+	while (!time_after(jiffies, t)) {
+		adreno_read_gmureg(ADRENO_DEVICE(device),
+				ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
+
+		/* SPTPRAC PC has the same idle level as IFPC */
+		if ((reg == gmu->idle_level) ||
+				(gmu->idle_level == GPU_HW_SPTP_PC &&
+				reg == GPU_HW_IFPC)) {
+			/* IFPC is not complete until GX is off */
+			if (gmu->idle_level != GPU_HW_IFPC ||
+					!gpudev->gx_is_on(adreno_dev))
+				return 0;
+		}
+
+		/* Wait 100us to reduce unnecessary AHB bus traffic */
+		udelay(100);
+	}
+
+	/* Check one last time */
+	adreno_read_gmureg(ADRENO_DEVICE(device),
+			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
+	if ((reg == gmu->idle_level) ||
+			(gmu->idle_level == GPU_HW_SPTP_PC &&
+			reg == GPU_HW_IFPC)) {
+		if (gmu->idle_level != GPU_HW_IFPC ||
+				!gpudev->gx_is_on(adreno_dev))
+			return 0;
+	}
+
+	dev_err(&gmu->pdev->dev,
+			"Timeout waiting for lowest idle level: %d\n", reg);
+	return -ETIMEDOUT;
+}
+
 static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -2874,6 +2927,20 @@ static void a6xx_platform_setup(struct adreno_device *adreno_dev)
 }
 
 
+static unsigned int a6xx_ccu_invalidate(struct adreno_device *adreno_dev,
+	unsigned int *cmds)
+{
+	/* CCU_INVALIDATE_DEPTH */
+	*cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
+	*cmds++ = 24;
+
+	/* CCU_INVALIDATE_COLOR */
+	*cmds++ = cp_packet(adreno_dev, CP_EVENT_WRITE, 1);
+	*cmds++ = 25;
+
+	return 4;
+}
+
 /* Register offset defines for A6XX, in order of enum adreno_regs */
 static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 
@@ -2939,6 +3006,8 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 				A6XX_GMU_ALWAYS_ON_COUNTER_L),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
 				A6XX_GMU_ALWAYS_ON_COUNTER_H),
+	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_AHB_FENCE_CTRL,
+				A6XX_GMU_AO_AHB_FENCE_CTRL),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_INTERRUPT_EN,
 				A6XX_GMU_AO_INTERRUPT_EN),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
@@ -3019,6 +3088,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
 	.gpu_keepalive = a6xx_gpu_keepalive,
 	.rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
 	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
+	.wait_for_lowest_idle = a6xx_wait_for_lowest_idle,
 	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
 	.iommu_fault_block = a6xx_iommu_fault_block,
 	.reset = a6xx_reset,
@@ -3032,4 +3102,5 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
 	.preemption_context_destroy = a6xx_preemption_context_destroy,
 	.gx_is_on = a6xx_gx_is_on,
 	.sptprac_is_on = a6xx_sptprac_is_on,
+	.ccu_invalidate = a6xx_ccu_invalidate,
 };
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 755fc7f..e865f20 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -557,6 +557,7 @@ static struct reg_list {
 	const unsigned int *regs;
 	unsigned int count;
 	const struct sel_reg *sel;
+	uint64_t offset;
 } a6xx_reg_list[] = {
 	{ a6xx_registers, ARRAY_SIZE(a6xx_registers) / 2, NULL },
 	{ a6xx_rb_rac_registers, ARRAY_SIZE(a6xx_rb_rac_registers) / 2,
@@ -589,7 +590,7 @@ static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
 	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
 	struct reg_list *regs = (struct reg_list *)priv;
 	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
-	unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
+	unsigned int *src;
 	unsigned int j, k;
 	unsigned int count = 0;
 
@@ -602,6 +603,7 @@ static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
 		return 0;
 	}
 
+	src = (unsigned int *)(a6xx_crashdump_registers.hostptr + regs->offset);
 	remain -= sizeof(*header);
 
 	for (j = 0; j < regs->count; j++) {
@@ -1577,6 +1579,12 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
 	bool sptprac_on;
 	unsigned int i;
 
+	/* Make sure the fence is in ALLOW mode so registers can be read */
+	kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+	/* GMU TCM data dumped through AHB */
+	a6xx_snapshot_gmu(adreno_dev, snapshot);
+
 	sptprac_on = gpudev->sptprac_is_on(adreno_dev);
 
 	/* Return if the GX is off */
@@ -1924,6 +1932,8 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev)
 	for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
 		struct reg_list *regs = &a6xx_reg_list[i];
 
+		regs->offset = offset;
+
 		/* Program the SEL_CNTL_CD register appropriately */
 		if (regs->sel) {
 			*ptr++ = regs->sel->val;
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index dfce3eb..9ea8069 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -174,10 +174,15 @@ inline void adreno_perfcounter_save(struct adreno_device *adreno_dev)
 	struct adreno_perfcounters *counters = ADRENO_PERFCOUNTERS(adreno_dev);
 	struct adreno_perfcount_group *group;
 	unsigned int counter, groupid;
+	int ret;
 
 	if (counters == NULL)
 		return;
 
+	ret = adreno_perfcntr_active_oob_get(adreno_dev);
+	if (ret)
+		return;
+
 	for (groupid = 0; groupid < counters->group_count; groupid++) {
 		group = &(counters->groups[groupid]);
 
@@ -197,6 +202,8 @@ inline void adreno_perfcounter_save(struct adreno_device *adreno_dev)
 								counter);
 		}
 	}
+
+	adreno_perfcntr_active_oob_put(adreno_dev);
 }
 
 static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 09c1ae6..d248479 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -83,10 +83,12 @@ static void adreno_get_submit_time(struct adreno_device *adreno_dev,
 /*
  * Wait time before trying to write the register again.
  * Hopefully the GMU has finished waking up during this delay.
+ * This delay must be less than the IFPC main hysteresis or
+ * the GMU will start shutting down before we try again.
  */
-#define GMU_WAKEUP_DELAY 50
+#define GMU_WAKEUP_DELAY 20
 /* Max amount of tries to wake up the GMU. */
-#define GMU_WAKEUP_RETRY_MAX 20
+#define GMU_WAKEUP_RETRY_MAX 60
 
 /*
  * Check the WRITEDROPPED0 bit in the
@@ -877,6 +879,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
 	if (gpudev->set_marker)
 		dwords += 4;
 
+	if (gpudev->ccu_invalidate)
+		dwords += 4;
+
 	link = kcalloc(dwords, sizeof(unsigned int), GFP_KERNEL);
 	if (!link) {
 		ret = -ENOMEM;
@@ -930,6 +935,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
 		}
 	}
 
+	if (gpudev->ccu_invalidate)
+		cmds += gpudev->ccu_invalidate(adreno_dev, cmds);
+
 	if (gpudev->set_marker)
 		cmds += gpudev->set_marker(cmds, 0);
 
diff --git a/drivers/gpu/msm/adreno_sysfs.c b/drivers/gpu/msm/adreno_sysfs.c
index b06aa98..704e79e 100644
--- a/drivers/gpu/msm/adreno_sysfs.c
+++ b/drivers/gpu/msm/adreno_sysfs.c
@@ -165,7 +165,6 @@ static int _ft_hang_intr_status_store(struct adreno_device *adreno_dev,
 
 	if (test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)) {
 		kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
-		adreno_irqctrl(adreno_dev, 1);
 	} else if (device->state == KGSL_STATE_INIT) {
 		ret = -EACCES;
 		change_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv);
@@ -221,6 +220,23 @@ static int _preemption_store(struct adreno_device *adreno_dev,
 	return 0;
 }
 
+static int _gmu_idle_level_store(struct adreno_device *adreno_dev,
+		unsigned int val)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+
+	mutex_lock(&device->mutex);
+
+	/* Power down the GPU before changing the idle level */
+	kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
+	gmu->idle_level = val;
+	kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
+
+	mutex_unlock(&device->mutex);
+	return 0;
+}
+
 static unsigned int _preemption_show(struct adreno_device *adreno_dev)
 {
 	return adreno_is_preemption_execution_enabled(adreno_dev);
@@ -269,6 +285,40 @@ static unsigned int _lm_show(struct adreno_device *adreno_dev)
 	return test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
 }
 
+static int _ifpc_store(struct adreno_device *adreno_dev, unsigned int val)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+	unsigned int requested_idle_level;
+
+	if (!kgsl_gmu_isenabled(device) ||
+			!ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
+		return -EINVAL;
+
+	if ((val && gmu->idle_level >= GPU_HW_IFPC) ||
+			(!val && gmu->idle_level < GPU_HW_IFPC))
+		return 0;
+
+	if (val)
+		requested_idle_level = GPU_HW_IFPC;
+	else {
+		if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
+			requested_idle_level = GPU_HW_SPTP_PC;
+		else
+			requested_idle_level = GPU_HW_ACTIVE;
+	}
+
+	return _gmu_idle_level_store(adreno_dev, requested_idle_level);
+}
+
+static unsigned int _ifpc_show(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+
+	return kgsl_gmu_isenabled(device) && gmu->idle_level >= GPU_HW_IFPC;
+}
+
 static ssize_t _sysfs_store_u32(struct device *dev,
 		struct device_attribute *attr,
 		const char *buf, size_t count)
@@ -367,6 +417,7 @@ static ADRENO_SYSFS_BOOL(lm);
 static ADRENO_SYSFS_BOOL(preemption);
 static ADRENO_SYSFS_BOOL(hwcg);
 static ADRENO_SYSFS_BOOL(throttling);
+static ADRENO_SYSFS_BOOL(ifpc);
 
 
 
@@ -387,6 +438,7 @@ static const struct device_attribute *_attr_list[] = {
 	&adreno_attr_preempt_level.attr,
 	&adreno_attr_usesgmem.attr,
 	&adreno_attr_skipsaverestore.attr,
+	&adreno_attr_ifpc.attr,
 	NULL,
 };
 
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 82f0c11..31d7870 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -505,9 +505,10 @@ static int rpmh_arc_cmds(struct gmu_device *gmu,
 	unsigned int len;
 
 	len = cmd_db_get_aux_data_len(res_id);
+	if (len == 0)
+		return -EINVAL;
 
 	if (len > (MAX_GX_LEVELS << 1)) {
-		/* CmdDB VLVL table size in bytes is too large */
 		dev_err(&gmu->pdev->dev,
 			"gfx cmddb size %d larger than alloc buf %d of %s\n",
 			len, (MAX_GX_LEVELS << 1), res_id);
@@ -515,8 +516,16 @@ static int rpmh_arc_cmds(struct gmu_device *gmu,
 	}
 
 	cmd_db_get_aux_data(res_id, (uint8_t *)arc->val, len);
-	for (arc->num = 1; arc->num <= MAX_GX_LEVELS; arc->num++) {
-		if (arc->num == MAX_GX_LEVELS ||
+
+	/*
+	 * cmd_db_get_aux_data() gives us a zero-padded table of
+	 * size len that contains the arc values. To determine the
+	 * number of arc values, we loop through the table and count
+	 * them until we get to the end of the buffer or hit the
+	 * zero padding.
+	 */
+	for (arc->num = 1; arc->num <= len; arc->num++) {
+		if (arc->num == len ||
 				arc->val[arc->num - 1] >= arc->val[arc->num])
 			break;
 	}
@@ -1291,37 +1300,6 @@ static int gmu_disable_gdsc(struct gmu_device *gmu)
 	return -ETIMEDOUT;
 }
 
-static int gmu_fast_boot(struct kgsl_device *device)
-{
-	int ret;
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-	struct gmu_device *gmu = &device->gmu;
-
-	hfi_stop(gmu);
-	clear_bit(GMU_HFI_ON, &gmu->flags);
-
-	ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
-		GMU_RESET, 0);
-	if (ret)
-		return ret;
-
-	/*FIXME: enabling WD interrupt*/
-
-	ret = hfi_start(gmu, GMU_WARM_BOOT);
-	if (ret)
-		return ret;
-
-	ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
-			OOB_CPINIT_CHECK_MASK, OOB_CPINIT_CLEAR_MASK);
-
-	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
-		gpudev->oob_clear(adreno_dev,
-				OOB_BOOT_SLUMBER_CLEAR_MASK);
-
-	return ret;
-}
-
 static int gmu_suspend(struct kgsl_device *device)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1455,68 +1433,51 @@ int gmu_start(struct kgsl_device *device)
 			/* Send DCVS level prior to reset*/
 			kgsl_pwrctrl_pwrlevel_change(device,
 				pwr->default_pwrlevel);
+		} else {
+			/* GMU fast boot */
+			hfi_stop(gmu);
 
-			ret = gpudev->oob_set(adreno_dev,
-				OOB_CPINIT_SET_MASK,
-				OOB_CPINIT_CHECK_MASK,
-				OOB_CPINIT_CLEAR_MASK);
+			ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
+					GMU_RESET, 0);
+			if (ret)
+				goto error_gmu;
 
-		} else
-			gmu_fast_boot(device);
+			ret = hfi_start(gmu, GMU_WARM_BOOT);
+			if (ret)
+				goto error_gmu;
+		}
 		break;
 	default:
 		break;
 	}
 
-	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
-		gpudev->oob_clear(adreno_dev,
-				OOB_BOOT_SLUMBER_CLEAR_MASK);
-
 	return ret;
 
 error_gmu:
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+		gpudev->oob_clear(adreno_dev,
+				OOB_BOOT_SLUMBER_CLEAR_MASK);
 	gmu_snapshot(device);
 	return ret;
 }
 
-#define GMU_IDLE_TIMEOUT	10 /* ms */
-
 /* Caller shall ensure GPU is ready for SLUMBER */
 void gmu_stop(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = &device->gmu;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-	unsigned long t;
-	bool idle = false;
-	unsigned int reg;
+	bool idle = true;
 
 	if (!test_bit(GMU_CLK_ON, &gmu->flags))
 		return;
 
-	t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
-	while (!time_after(jiffies, t)) {
-		adreno_read_gmureg(ADRENO_DEVICE(device),
-			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
-		if (reg == device->gmu.idle_level) {
-			idle = true;
-			break;
-		}
-		/* Wait 100us to reduce unnecessary AHB bus traffic */
-		udelay(100);
-		cond_resched();
-	}
-
-	/* Double check one last time */
-	if (idle == false) {
-		adreno_read_gmureg(ADRENO_DEVICE(device),
-			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
-		if (reg == device->gmu.idle_level)
-			idle = true;
-	}
+	/* Wait for the lowest idle level we requested */
+	if (gpudev->wait_for_lowest_idle &&
+			gpudev->wait_for_lowest_idle(adreno_dev))
+		idle = false;
 
 	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
-
 	if (!idle || (gpudev->wait_for_gmu_idle &&
 			gpudev->wait_for_gmu_idle(adreno_dev))) {
 		dev_err(&gmu->pdev->dev, "Stopping GMU before it is idle\n");
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index a5dc692..adabbc2 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -56,6 +56,9 @@
 #define GPUBUSYIGNAHB		BIT(23)
 #define CXGXCPUBUSYIGNAHB	BIT(30)
 
+/* GMU timeouts */
+#define GMU_IDLE_TIMEOUT        10 /* ms */
+
 /* Constants for GMU OOBs */
 #define OOB_BOOT_OPTION         0
 #define OOB_SLUMBER_OPTION      1
@@ -67,15 +70,12 @@
 #define OOB_DCVS_SET_MASK		BIT(23)
 #define OOB_DCVS_CHECK_MASK		BIT(31)
 #define OOB_DCVS_CLEAR_MASK		BIT(31)
-#define OOB_CPINIT_SET_MASK		BIT(16)
-#define OOB_CPINIT_CHECK_MASK		BIT(24)
-#define OOB_CPINIT_CLEAR_MASK		BIT(24)
+#define OOB_GPU_SET_MASK		BIT(16)
+#define OOB_GPU_CHECK_MASK		BIT(24)
+#define OOB_GPU_CLEAR_MASK		BIT(24)
 #define OOB_PERFCNTR_SET_MASK		BIT(17)
 #define OOB_PERFCNTR_CHECK_MASK		BIT(25)
 #define OOB_PERFCNTR_CLEAR_MASK		BIT(25)
-#define OOB_GPUSTART_SET_MASK		BIT(18)
-#define OOB_GPUSTART_CHECK_MASK		BIT(26)
-#define OOB_GPUSTART_CLEAR_MASK		BIT(26)
 
 /* Bits for the flags field in the gmu structure */
 enum gmu_flags {
diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c
index 68e0f3a..2cc60b5 100644
--- a/drivers/gpu/msm/kgsl_hfi.c
+++ b/drivers/gpu/msm/kgsl_hfi.c
@@ -278,8 +278,7 @@ int hfi_send_gmu_init(struct gmu_device *gmu, uint32_t boot_state)
 	int rc = 0;
 	struct pending_msg msg;
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *)&init_msg,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &init_msg.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -309,8 +308,7 @@ int hfi_get_fw_version(struct gmu_device *gmu,
 	int rc = 0;
 	struct pending_msg msg;
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *)&fw_ver,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &fw_ver.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -346,8 +344,7 @@ int hfi_send_lmconfig(struct gmu_device *gmu)
 		lmconfig.lm_enable_bitmask =
 			(1 << (gmu->lm_dcvs_level + 1)) - 1;
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *) &lmconfig,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &lmconfig.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -388,8 +385,7 @@ int hfi_send_perftbl(struct gmu_device *gmu)
 
 	}
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *)&dcvstbl,
-			msg_size, &msg);
+	rc = hfi_send_msg(gmu, &dcvstbl.hdr, msg_size, &msg);
 	if (rc)
 		return rc;
 
@@ -441,8 +437,7 @@ int hfi_send_bwtbl(struct gmu_device *gmu)
 					gmu->rpmh_votes.cnoc_votes.
 					cmd_data[i][j];
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *) &bwtbl,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &bwtbl.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -454,6 +449,22 @@ int hfi_send_bwtbl(struct gmu_device *gmu)
 	return rc;
 }
 
+static int hfi_send_test(struct gmu_device *gmu)
+{
+	struct hfi_test_cmd test_msg = {
+		.hdr = {
+			.id = H2F_MSG_TEST,
+			.size = sizeof(test_msg) >> 2,
+			.type = HFI_MSG_CMD,
+		},
+	};
+	uint32_t msg_size_dwords = (sizeof(test_msg)) >> 2;
+	struct pending_msg msg;
+
+	return hfi_send_msg(gmu, (struct hfi_msg_hdr *)&test_msg.hdr,
+			msg_size_dwords, &msg);
+}
+
 int hfi_send_dcvs_vote(struct gmu_device *gmu, uint32_t perf_idx,
 		uint32_t bw_idx, enum rpm_ack_type ack_type)
 {
@@ -478,8 +489,7 @@ int hfi_send_dcvs_vote(struct gmu_device *gmu, uint32_t perf_idx,
 	int rc = 0;
 	struct pending_msg msg;
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *)&dcvs_cmd,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &dcvs_cmd.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -511,8 +521,7 @@ int hfi_notify_slumber(struct gmu_device *gmu,
 	if (init_perf_idx >= MAX_GX_LEVELS || init_bw_idx >= MAX_GX_LEVELS)
 		return -EINVAL;
 
-	rc = hfi_send_msg(gmu, (struct hfi_msg_hdr *) &slumber_cmd,
-			msg_size_dwords, &msg);
+	rc = hfi_send_msg(gmu, &slumber_cmd.hdr, msg_size_dwords, &msg);
 	if (rc)
 		return rc;
 
@@ -614,12 +623,19 @@ int hfi_start(struct gmu_device *gmu, uint32_t boot_state)
 
 		result = hfi_send_lmconfig(gmu);
 		if (result) {
-			dev_err(dev, "Failire enabling limits management (%d)\n",
-				result);
+			dev_err(dev, "Failure enabling LM (%d)\n",
+					result);
 			return result;
 		}
 	}
 
+	/* Tell the GMU we are sending no more HFIs until the next boot */
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
+		result = hfi_send_test(gmu);
+		if (result)
+			return result;
+	}
+
 	set_bit(GMU_HFI_ON, &gmu->flags);
 	return 0;
 }
diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h
index 47d07d9..105599c2 100644
--- a/drivers/gpu/msm/kgsl_hfi.h
+++ b/drivers/gpu/msm/kgsl_hfi.h
@@ -115,7 +115,7 @@ enum hfi_f2h_qpri {
 	HFI_F2H_QPRI_DEBUG = 40,
 };
 
-#define HFI_RSP_TIMEOUT 500 /* msec */
+#define HFI_RSP_TIMEOUT 5000 /* msec */
 #define HFI_H2F_CMD_IRQ_MASK BIT(0)
 
 enum hfi_msg_type {
@@ -228,6 +228,10 @@ struct hfi_bwtable_cmd {
 	uint32_t ddr_cmd_data[MAX_GX_LEVELS][MAX_BW_CMDS];
 };
 
+struct hfi_test_cmd {
+	struct hfi_msg_hdr hdr;
+};
+
 struct arc_vote_desc {
 	/* In case of GPU freq vote, primary is GX, secondary is MX
 	 * in case of GMU freq vote, primary is CX, secondary is MX
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 8ea4492..a0fd3ec 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -26,6 +26,17 @@
 
 static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
 
+static void _deferred_destroy(struct work_struct *ws)
+{
+	struct kgsl_pagetable *pagetable = container_of(ws,
+					struct kgsl_pagetable, destroy_ws);
+
+	if (PT_OP_VALID(pagetable, mmu_destroy_pagetable))
+		pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
+
+	kfree(pagetable);
+}
+
 static void kgsl_destroy_pagetable(struct kref *kref)
 {
 	struct kgsl_pagetable *pagetable = container_of(kref,
@@ -33,10 +44,7 @@ static void kgsl_destroy_pagetable(struct kref *kref)
 
 	kgsl_mmu_detach_pagetable(pagetable);
 
-	if (PT_OP_VALID(pagetable, mmu_destroy_pagetable))
-		pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
-
-	kfree(pagetable);
+	kgsl_schedule_work(&pagetable->destroy_ws);
 }
 
 static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
@@ -299,6 +307,7 @@ kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu, unsigned int name)
 	kref_init(&pagetable->refcount);
 
 	spin_lock_init(&pagetable->lock);
+	INIT_WORK(&pagetable->destroy_ws, _deferred_destroy);
 
 	pagetable->mmu = mmu;
 	pagetable->name = name;
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 56bb317..7a8ab74 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -42,6 +42,7 @@ struct kgsl_pagetable {
 	struct list_head list;
 	unsigned int name;
 	struct kobject *kobj;
+	struct work_struct destroy_ws;
 
 	struct {
 		atomic_t entries;
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
index a597ef9..c4da780 100644
--- a/drivers/hwmon/qpnp-adc-current.c
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -27,17 +27,11 @@
 #include <linux/spmi.h>
 #include <linux/platform_device.h>
 #include <linux/of_irq.h>
-#ifdef CONFIG_WAKELOCK
-#include <linux/wakelock.h>
-#endif
 #include <linux/interrupt.h>
 #include <linux/completion.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/qpnp/qpnp-adc.h>
 #include <linux/platform_device.h>
-#ifdef CONFIG_WAKELOCK
-#include <linux/wakelock.h>
-#endif
 
 /* QPNP IADC register definition */
 #define QPNP_IADC_REVISION1				0x0
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
index 496738c..7ef2710 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.c
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -341,7 +341,7 @@ struct byte_cntr *byte_cntr_init(struct amba_device *adev,
 	if (byte_cntr_irq < 0)
 		return NULL;
 
-	byte_cntr_data = devm_kmalloc(dev, sizeof(*byte_cntr_data), GFP_KERNEL);
+	byte_cntr_data = devm_kzalloc(dev, sizeof(*byte_cntr_data), GFP_KERNEL);
 	if (!byte_cntr_data)
 		return NULL;
 
@@ -363,7 +363,6 @@ struct byte_cntr *byte_cntr_init(struct amba_device *adev,
 	}
 
 	tmcdrvdata = drvdata;
-	byte_cntr_data->block_size = 0;
 	byte_cntr_data->byte_cntr_irq = byte_cntr_irq;
 	atomic_set(&byte_cntr_data->irq_cnt, 0);
 	init_waitqueue_head(&byte_cntr_data->wq);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 71e4103..ee4b8b4 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -976,7 +976,12 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
 
 	spin_lock_init(&drvdata->spinlock);
 
-	drvdata->cpu = pdata ? pdata->cpu : 0;
+	drvdata->cpu = pdata ? pdata->cpu : -1;
+
+	if (drvdata->cpu == -1) {
+		dev_info(dev, "CPU not available\n");
+		return -ENODEV;
+	}
 
 	get_online_cpus();
 
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 9e024ce..eb70e7a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -994,7 +994,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
 		goto out;
 	}
 
-	if (!drvdata->byte_cntr || drvdata->byte_cntr->enable) {
+	if (drvdata->byte_cntr && drvdata->byte_cntr->enable) {
 		ret = -EINVAL;
 		goto out;
 	}
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 81bbd78..5473fcf 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -182,8 +182,8 @@ struct coresight_platform_data *of_get_coresight_platform_data(
 		} while (ep);
 	}
 
-	/* Affinity defaults to CPU0 */
-	pdata->cpu = 0;
+	/* Affinity defaults to invalid */
+	pdata->cpu = -1;
 	dn = of_parse_phandle(node, "cpu", 0);
 	for (cpu = 0; dn && cpu < nr_cpu_ids; cpu++) {
 		if (dn == of_get_cpu_node(cpu, NULL)) {
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 7e9999b..2460ba7 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -910,8 +910,23 @@ static int geni_i2c_runtime_resume(struct device *dev)
 
 static int geni_i2c_suspend_noirq(struct device *device)
 {
-	if (!pm_runtime_status_suspended(device))
+	struct geni_i2c_dev *gi2c = dev_get_drvdata(device);
+	int ret;
+
+	/* Make sure no transactions are pending */
+	ret = i2c_trylock_bus(&gi2c->adap, I2C_LOCK_SEGMENT);
+	if (!ret) {
+		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+				"late I2C transaction request\n");
 		return -EBUSY;
+	}
+	if (!pm_runtime_status_suspended(device)) {
+		geni_i2c_runtime_suspend(device);
+		pm_runtime_disable(device);
+		pm_runtime_set_suspended(device);
+		pm_runtime_enable(device);
+	}
+	i2c_unlock_bus(&gi2c->adap, I2C_LOCK_SEGMENT);
 	return 0;
 }
 #else
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 357bfb2..02dfbf8 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -22,6 +22,7 @@
 #include <linux/regmap.h>
 #include <linux/delay.h>
 #include <linux/qpnp/qpnp-revid.h>
+#include <linux/power_supply.h>
 
 #define FG_ADC_RR_EN_CTL			0x46
 #define FG_ADC_RR_SKIN_TEMP_LSB			0x50
@@ -192,8 +193,7 @@
 #define FG_RR_ADC_STS_CHANNEL_READING_MASK	0x3
 #define FG_RR_ADC_STS_CHANNEL_STS		0x2
 
-#define FG_RR_CONV_CONTINUOUS_TIME_MIN_US	50000
-#define FG_RR_CONV_CONTINUOUS_TIME_MAX_US	51000
+#define FG_RR_CONV_CONTINUOUS_TIME_MIN_MS	50
 #define FG_RR_CONV_MAX_RETRY_CNT		50
 #define FG_RR_TP_REV_VERSION1		21
 #define FG_RR_TP_REV_VERSION2		29
@@ -235,6 +235,7 @@ struct rradc_chip {
 	struct device_node		*revid_dev_node;
 	struct pmic_revid_data		*pmic_fab_id;
 	int volt;
+	struct power_supply		*usb_trig;
 };
 
 struct rradc_channels {
@@ -726,6 +727,24 @@ static int rradc_disable_continuous_mode(struct rradc_chip *chip)
 	return rc;
 }
 
+static bool rradc_is_usb_present(struct rradc_chip *chip)
+{
+	union power_supply_propval pval;
+	int rc;
+	bool usb_present = false;
+
+	if (!chip->usb_trig) {
+		pr_debug("USB property not present\n");
+		return usb_present;
+	}
+
+	rc = power_supply_get_property(chip->usb_trig,
+			POWER_SUPPLY_PROP_PRESENT, &pval);
+	usb_present = (rc < 0) ? 0 : pval.intval;
+
+	return usb_present;
+}
+
 static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
 		struct rradc_chan_prop *prop, u8 *buf, u16 status)
 {
@@ -745,8 +764,18 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
 			(retry_cnt < FG_RR_CONV_MAX_RETRY_CNT)) {
 		pr_debug("%s is not ready; nothing to read:0x%x\n",
 			rradc_chans[prop->channel].datasheet_name, buf[0]);
-		usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN_US,
-				FG_RR_CONV_CONTINUOUS_TIME_MAX_US);
+
+		if (((prop->channel == RR_ADC_CHG_TEMP) ||
+			(prop->channel == RR_ADC_SKIN_TEMP) ||
+			(prop->channel == RR_ADC_USBIN_I) ||
+			(prop->channel == RR_ADC_DIE_TEMP)) &&
+					((!rradc_is_usb_present(chip)))) {
+			pr_debug("USB not present for %d\n", prop->channel);
+			rc = -ENODATA;
+			break;
+		}
+
+		msleep(FG_RR_CONV_CONTINUOUS_TIME_MIN_MS);
 		retry_cnt++;
 		rc = rradc_read(chip, status, buf, 1);
 		if (rc < 0) {
@@ -764,7 +793,7 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
 static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
 			struct rradc_chan_prop *prop, u8 *buf)
 {
-	int rc = 0;
+	int rc = 0, ret = 0;
 	u16 status = 0;
 
 	rc = rradc_enable_continuous_mode(chip);
@@ -777,23 +806,25 @@ static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
 	rc = rradc_read(chip, status, buf, 1);
 	if (rc < 0) {
 		pr_err("status read failed:%d\n", rc);
-		return rc;
+		ret = rc;
+		goto disable;
 	}
 
 	rc = rradc_check_status_ready_with_retry(chip, prop,
 						buf, status);
 	if (rc < 0) {
 		pr_err("Status read failed:%d\n", rc);
-		return rc;
+		ret = rc;
 	}
 
+disable:
 	rc = rradc_disable_continuous_mode(chip);
 	if (rc < 0) {
 		pr_err("Failed to switch to non continuous mode\n");
-		return rc;
+		ret = rc;
 	}
 
-	return rc;
+	return ret;
 }
 
 static int rradc_enable_batt_id_channel(struct rradc_chip *chip, bool enable)
@@ -1152,6 +1183,10 @@ static int rradc_probe(struct platform_device *pdev)
 	indio_dev->channels = chip->iio_chans;
 	indio_dev->num_channels = chip->nchannels;
 
+	chip->usb_trig = power_supply_get_by_name("usb");
+	if (!chip->usb_trig)
+		pr_debug("Error obtaining usb power supply\n");
+
 	return devm_iio_device_register(dev, indio_dev);
 }
 
diff --git a/drivers/mailbox/msm_qmp.c b/drivers/mailbox/msm_qmp.c
index 3b07c47..d6e41ae 100644
--- a/drivers/mailbox/msm_qmp.c
+++ b/drivers/mailbox/msm_qmp.c
@@ -359,7 +359,7 @@ static int qmp_send_data(struct mbox_chan *chan, void *data)
 	addr = mdev->msgram + mbox->mcore_mbox_offset;
 	if (mbox->tx_sent) {
 		spin_unlock_irqrestore(&mbox->tx_lock, flags);
-		return -EBUSY;
+		return -EAGAIN;
 	}
 
 	if (pkt->size + sizeof(pkt->size) > mbox->mcore_mbox_size) {
diff --git a/drivers/media/platform/msm/broadcast/tspp.c b/drivers/media/platform/msm/broadcast/tspp.c
index 2c90e47..be7f4a1 100644
--- a/drivers/media/platform/msm/broadcast/tspp.c
+++ b/drivers/media/platform/msm/broadcast/tspp.c
@@ -36,7 +36,6 @@
 #include <linux/regulator/consumer.h>
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 #include <linux/msm-sps.h>            /* BAM stuff */
-#include <linux/wakelock.h>      /* Locking functions */
 #include <linux/timer.h>         /* Timer services */
 #include <linux/jiffies.h>       /* Jiffies counter */
 #include <linux/qcom_tspp.h>
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
index 6c8bde1..3fbb3f0 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
@@ -338,6 +338,7 @@ int cam_cdm_stream_ops_internal(void *hw_priv,
 				}
 			} else {
 				client->stream_on = false;
+				rc = 0;
 				CAM_DBG(CAM_CDM,
 					"Client stream off success =%d",
 					cdm_hw->open_count);
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
index b230d4e..d76f344 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
@@ -131,8 +131,10 @@ int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
 				cdm_cmd->cmd[i].len, client->data.base_array,
 				client->data.base_array_cnt, core->bl_tag);
 			if (rc) {
-				CAM_ERR(CAM_CDM, "write failed for cnt=%d:%d",
-					i, req->data->cmd_arrary_count);
+				CAM_ERR(CAM_CDM,
+					"write failed for cnt=%d:%d len %u",
+					i, req->data->cmd_arrary_count,
+					cdm_cmd->cmd[i].len);
 				break;
 			}
 		} else {
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 3a78b5e..6e48c6a 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -65,7 +65,8 @@ static int __cam_node_handle_acquire_dev(struct cam_node *node,
 
 	rc = cam_context_handle_acquire_dev(ctx, acquire);
 	if (rc) {
-		CAM_ERR(CAM_CORE, "Acquire device failed");
+		CAM_ERR(CAM_CORE, "Acquire device failed for node %s",
+			node->name);
 		goto free_ctx;
 	}
 
@@ -82,6 +83,7 @@ static int __cam_node_handle_start_dev(struct cam_node *node,
 	struct cam_start_stop_dev_cmd *start)
 {
 	struct cam_context *ctx = NULL;
+	int rc;
 
 	if (!start)
 		return -EINVAL;
@@ -103,13 +105,18 @@ static int __cam_node_handle_start_dev(struct cam_node *node,
 		return -EINVAL;
 	}
 
-	return cam_context_handle_start_dev(ctx, start);
+	rc = cam_context_handle_start_dev(ctx, start);
+	if (rc)
+		CAM_ERR(CAM_CORE, "Start failure for node %s", node->name);
+
+	return rc;
 }
 
 static int __cam_node_handle_stop_dev(struct cam_node *node,
 	struct cam_start_stop_dev_cmd *stop)
 {
 	struct cam_context *ctx = NULL;
+	int rc;
 
 	if (!stop)
 		return -EINVAL;
@@ -131,13 +138,18 @@ static int __cam_node_handle_stop_dev(struct cam_node *node,
 		return -EINVAL;
 	}
 
-	return cam_context_handle_stop_dev(ctx, stop);
+	rc = cam_context_handle_stop_dev(ctx, stop);
+	if (rc)
+		CAM_ERR(CAM_CORE, "Stop failure for node %s", node->name);
+
+	return rc;
 }
 
 static int __cam_node_handle_config_dev(struct cam_node *node,
 	struct cam_config_dev_cmd *config)
 {
 	struct cam_context *ctx = NULL;
+	int rc;
 
 	if (!config)
 		return -EINVAL;
@@ -159,7 +171,11 @@ static int __cam_node_handle_config_dev(struct cam_node *node,
 		return -EINVAL;
 	}
 
-	return cam_context_handle_config_dev(ctx, config);
+	rc = cam_context_handle_config_dev(ctx, config);
+	if (rc)
+		CAM_ERR(CAM_CORE, "Config failure for node %s", node->name);
+
+	return rc;
 }
 
 static int __cam_node_handle_release_dev(struct cam_node *node,
@@ -183,18 +199,19 @@ static int __cam_node_handle_release_dev(struct cam_node *node,
 
 	ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
 	if (!ctx) {
-		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
-			release->dev_handle);
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s",
+			release->dev_handle, node->name);
 		return -EINVAL;
 	}
 
 	rc = cam_context_handle_release_dev(ctx, release);
 	if (rc)
-		CAM_ERR(CAM_CORE, "context release failed");
+		CAM_ERR(CAM_CORE, "context release failed node %s", node->name);
 
 	rc = cam_destroy_device_hdl(release->dev_handle);
 	if (rc)
-		CAM_ERR(CAM_CORE, "destroy device handle is failed");
+		CAM_ERR(CAM_CORE, "destroy device handle is failed node %s",
+			node->name);
 
 	mutex_lock(&node->list_mutex);
 	list_add_tail(&ctx->list, &node->free_ctx_list);
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
index 11a81d6..b8a5685 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
@@ -661,7 +661,6 @@ int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
 
 	if (fd_hw->open_count > 0) {
 		rc = 0;
-		mutex_unlock(&fd_hw->hw_mutex);
 		goto cdm_streamon;
 	}
 
@@ -681,12 +680,13 @@ int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
 
 	fd_hw->hw_state = CAM_HW_STATE_POWER_UP;
 	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+
+cdm_streamon:
 	fd_hw->open_count++;
 	CAM_DBG(CAM_FD, "FD HW Init ref count after %d", fd_hw->open_count);
 
 	mutex_unlock(&fd_hw->hw_mutex);
 
-cdm_streamon:
 	if (init_args->ctx_hw_private) {
 		struct cam_fd_ctx_hw_private *ctx_hw_private =
 			init_args->ctx_hw_private;
@@ -712,7 +712,7 @@ int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
 int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
 {
 	struct cam_hw_info *fd_hw = hw_priv;
-	struct cam_fd_core *fd_core;
+	struct cam_fd_core *fd_core = NULL;
 	struct cam_fd_hw_deinit_args *deinit_args =
 		(struct cam_fd_hw_deinit_args *)deinit_hw_args;
 	int rc = 0;
@@ -728,23 +728,7 @@ int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
 		return -EINVAL;
 	}
 
-	fd_core = (struct cam_fd_core *)fd_hw->core_info;
-
-	if (deinit_args->ctx_hw_private) {
-		struct cam_fd_ctx_hw_private *ctx_hw_private =
-			deinit_args->ctx_hw_private;
-
-		rc = cam_cdm_stream_off(ctx_hw_private->cdm_handle);
-		if (rc) {
-			CAM_ERR(CAM_FD,
-				"Failed in CDM StreamOff, handle=0x%x, rc=%d",
-				ctx_hw_private->cdm_handle, rc);
-			return rc;
-		}
-	}
-
 	mutex_lock(&fd_hw->hw_mutex);
-
 	if (fd_hw->open_count == 0) {
 		mutex_unlock(&fd_hw->hw_mutex);
 		CAM_ERR(CAM_FD, "Error Unbalanced deinit");
@@ -754,9 +738,9 @@ int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
 	fd_hw->open_count--;
 	CAM_DBG(CAM_FD, "FD HW ref count=%d", fd_hw->open_count);
 
-	if (fd_hw->open_count) {
+	if (fd_hw->open_count > 0) {
 		rc = 0;
-		goto unlock_return;
+		goto positive_ref_cnt;
 	}
 
 	rc = cam_fd_soc_disable_resources(&fd_hw->soc_info);
@@ -764,9 +748,26 @@ int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
 		CAM_ERR(CAM_FD, "Failed in Disable SOC, rc=%d", rc);
 
 	fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+
+	/* With the ref_cnt correct, this should never happen */
+	WARN_ON(!fd_core);
+
 	fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
 
-unlock_return:
+positive_ref_cnt:
+	if (deinit_args->ctx_hw_private) {
+		struct cam_fd_ctx_hw_private *ctx_hw_private =
+			deinit_args->ctx_hw_private;
+
+		rc = cam_cdm_stream_off(ctx_hw_private->cdm_handle);
+		if (rc) {
+			CAM_ERR(CAM_FD,
+				"Failed in CDM StreamOff, handle=0x%x, rc=%d",
+				ctx_hw_private->cdm_handle, rc);
+		}
+	}
+
 	mutex_unlock(&fd_hw->hw_mutex);
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 3354e2c..3844673 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -47,7 +47,6 @@
 #include "cam_debug_util.h"
 #include "cam_soc_util.h"
 
-#define ICP_WORKQ_NUM_TASK      30
 #define ICP_WORKQ_TASK_CMD_TYPE 1
 #define ICP_WORKQ_TASK_MSG_TYPE 2
 
@@ -295,6 +294,7 @@ static bool cam_icp_update_clk_busy(struct cam_icp_hw_mgr *hw_mgr,
 {
 	uint32_t next_clk_level;
 	uint32_t actual_clk;
+	bool rc = false;
 
 	/* 1. if current request frame cycles(fc) are more than previous
 	 *      frame fc
@@ -308,7 +308,8 @@ static bool cam_icp_update_clk_busy(struct cam_icp_hw_mgr *hw_mgr,
 	 * 2. if current fc is less than or equal to previous  frame fc
 	 *      Still Bump up the clock to next available level
 	 *      if it is available, then update clock, make overclk cnt to
-	 *      zero
+	 *      zero. If the clock is already at highest clock rate then
+	 *      no need to update the clock
 	 */
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	ctx_data->clk_info.curr_fc = clk_info->frame_cycles;
@@ -326,14 +327,19 @@ static bool cam_icp_update_clk_busy(struct cam_icp_hw_mgr *hw_mgr,
 				ctx_data, hw_mgr_clk_info->curr_clk);
 			hw_mgr_clk_info->curr_clk = next_clk_level;
 		}
+		rc = true;
 	} else {
-		hw_mgr_clk_info->curr_clk =
+		next_clk_level =
 			cam_icp_get_next_clk_rate(hw_mgr, ctx_data,
 			hw_mgr_clk_info->curr_clk);
+		if (hw_mgr_clk_info->curr_clk < next_clk_level) {
+			hw_mgr_clk_info->curr_clk = next_clk_level;
+			rc = true;
+		}
 	}
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
-	return true;
+	return rc;
 }
 
 static bool cam_icp_update_clk_overclk_free(struct cam_icp_hw_mgr *hw_mgr,
@@ -581,8 +587,9 @@ static bool cam_icp_check_bw_update(struct cam_icp_hw_mgr *hw_mgr,
 	rc = cam_icp_update_bw(hw_mgr, ctx_data, hw_mgr_clk_info,
 		clk_info, busy);
 
-	CAM_DBG(CAM_ICP, "bw = %d update_bw = %d",
-		hw_mgr_clk_info->uncompressed_bw, rc);
+	CAM_DBG(CAM_ICP, "ubw = %lld, cbw = %lld, update_bw = %d",
+		hw_mgr_clk_info->uncompressed_bw,
+		hw_mgr_clk_info->compressed_bw, rc);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index d1793e6..c4a483f 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -33,7 +33,7 @@
 
 #define CAM_MAX_OUT_RES         6
 
-#define ICP_WORKQ_NUM_TASK      30
+#define ICP_WORKQ_NUM_TASK      100
 #define ICP_WORKQ_TASK_CMD_TYPE 1
 #define ICP_WORKQ_TASK_MSG_TYPE 2
 
@@ -124,6 +124,7 @@ struct hfi_frame_process_info {
  * @curr_fc: Context latest request frame cycles
  * @rt_flag: Flag to indicate real time request
  * @base_clk: Base clock to process the request
+ * @reserved: Reserved field
  * #uncompressed_bw: Current bandwidth voting
  * @compressed_bw: Current compressed bandwidth voting
  * @clk_rate: Supported clock rates for the context
@@ -132,8 +133,9 @@ struct cam_ctx_clk_info {
 	uint32_t curr_fc;
 	uint32_t rt_flag;
 	uint32_t base_clk;
-	uint32_t uncompressed_bw;
-	uint32_t compressed_bw;
+	uint32_t reserved;
+	uint64_t uncompressed_bw;
+	uint64_t compressed_bw;
 	int32_t clk_rate[CAM_MAX_VOTE];
 };
 /**
@@ -196,8 +198,8 @@ struct cam_icp_clk_info {
 	uint32_t curr_clk;
 	uint32_t threshold;
 	uint32_t over_clked;
-	uint32_t uncompressed_bw;
-	uint32_t compressed_bw;
+	uint64_t uncompressed_bw;
+	uint64_t compressed_bw;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 4ecb36e..aad7902 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -64,6 +64,51 @@ static int __cam_isp_ctx_enqueue_request_in_order(
 	return 0;
 }
 
+static const char *__cam_isp_resource_handle_id_to_type
+	(uint32_t resource_handle)
+{
+	switch (resource_handle) {
+	case CAM_ISP_IFE_OUT_RES_FULL:
+		return "CAM_ISP_IFE_OUT_RES_FULL";
+	case CAM_ISP_IFE_OUT_RES_DS4:
+		return "CAM_ISP_IFE_OUT_RES_DS4";
+	case CAM_ISP_IFE_OUT_RES_DS16:
+		return "CAM_ISP_IFE_OUT_RES_DS16";
+	case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
+		return "CAM_ISP_IFE_OUT_RES_RAW_DUMP";
+	case CAM_ISP_IFE_OUT_RES_FD:
+		return "CAM_ISP_IFE_OUT_RES_FD";
+	case CAM_ISP_IFE_OUT_RES_PDAF:
+		return "CAM_ISP_IFE_OUT_RES_PDAF";
+	case CAM_ISP_IFE_OUT_RES_RDI_0:
+		return "CAM_ISP_IFE_OUT_RES_RDI_0";
+	case CAM_ISP_IFE_OUT_RES_RDI_1:
+		return "CAM_ISP_IFE_OUT_RES_RDI_1";
+	case CAM_ISP_IFE_OUT_RES_RDI_2:
+		return "CAM_ISP_IFE_OUT_RES_RDI_2";
+	case CAM_ISP_IFE_OUT_RES_RDI_3:
+		return "CAM_ISP_IFE_OUT_RES_RDI_3";
+	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE:
+		return "CAM_ISP_IFE_OUT_RES_STATS_HDR_BE";
+	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST:
+		return "CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST";
+	case CAM_ISP_IFE_OUT_RES_STATS_TL_BG:
+		return "CAM_ISP_IFE_OUT_RES_STATS_TL_BG";
+	case CAM_ISP_IFE_OUT_RES_STATS_BF:
+		return "CAM_ISP_IFE_OUT_RES_STATS_BF";
+	case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG:
+		return "CAM_ISP_IFE_OUT_RES_STATS_AWB_BG";
+	case CAM_ISP_IFE_OUT_RES_STATS_BHIST:
+		return "CAM_ISP_IFE_OUT_RES_STATS_BHIST";
+	case CAM_ISP_IFE_OUT_RES_STATS_RS:
+		return "CAM_ISP_IFE_OUT_RES_STATS_RS";
+	case CAM_ISP_IFE_OUT_RES_STATS_CS:
+		return "CAM_ISP_IFE_OUT_RES_STATS_CS";
+	default:
+		return "CAM_ISP_Invalid_Resource_Type";
+	}
+}
+
 static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
 {
 	uint64_t ts = 0;
@@ -101,6 +146,23 @@ static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
 	return ts;
 }
 
+static void __cam_isp_ctx_handle_buf_done_fail_log(
+	struct cam_isp_ctx_req *req_isp)
+{
+	int i;
+
+	CAM_ERR_RATE_LIMIT(CAM_ISP,
+		"Resource Handles that fail to generate buf_done in prev frame");
+	for (i = 0; i < req_isp->num_fence_map_out; i++) {
+		if (req_isp->fence_map_out[i].sync_id != -1)
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Resource_Handle: [%s] Sync_ID: [0x%x]",
+			__cam_isp_resource_handle_id_to_type(
+			req_isp->fence_map_out[i].resource_handle),
+			req_isp->fence_map_out[i].sync_id);
+	}
+}
+
 static int __cam_isp_ctx_handle_buf_done_in_activated_state(
 	struct cam_isp_context *ctx_isp,
 	struct cam_isp_hw_done_event_data *done,
@@ -143,8 +205,12 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state(
 		if (!bubble_state) {
 			CAM_DBG(CAM_ISP, "Sync with success: fd 0x%x",
 				   req_isp->fence_map_out[j].sync_id);
-			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
-				CAM_SYNC_STATE_SIGNALED_SUCCESS);
+			if (req_isp->fence_map_out[j].sync_id == -1)
+				__cam_isp_ctx_handle_buf_done_fail_log(req_isp);
+			else
+				rc = cam_sync_signal(req_isp->
+					fence_map_out[j].sync_id,
+					CAM_SYNC_STATE_SIGNALED_SUCCESS);
 			if (rc)
 				CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
 					 rc);
@@ -731,14 +797,6 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
 	 *
 	 */
 	ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
-	if (ctx_isp->active_req_cnt >=  2) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"Reject apply request due to congestion(cnt = %d)",
-			ctx_isp->active_req_cnt);
-		rc = -EFAULT;
-		goto end;
-	}
-
 	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
 		list);
 
@@ -757,6 +815,14 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
 	CAM_DBG(CAM_ISP, "Apply request %lld", req->request_id);
 	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 
+	if (ctx_isp->active_req_cnt >=  2) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Reject apply request due to congestion(cnt = %d)",
+			ctx_isp->active_req_cnt);
+		__cam_isp_ctx_handle_buf_done_fail_log(req_isp);
+		rc = -EFAULT;
+		goto end;
+	}
 	req_isp->bubble_report = apply->report_if_bubble;
 
 	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
@@ -1507,6 +1573,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
 		add_req.link_hdl = ctx->link_hdl;
 		add_req.dev_hdl  = ctx->dev_hdl;
 		add_req.req_id   = req->request_id;
+		add_req.skip_before_applying = 0;
 		rc = ctx->ctx_crm_intf->add_req(&add_req);
 		if (rc) {
 			CAM_ERR(CAM_ISP, "Error: Adding request id=%llu",
@@ -1882,27 +1949,14 @@ static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
 	struct cam_release_dev_cmd *cmd)
 {
 	int rc = 0;
-	struct cam_isp_context *ctx_isp =
-		(struct cam_isp_context *) ctx->ctx_priv;
 
-	__cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
+	rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
 
-	if (ctx_isp->hw_ctx) {
-		struct cam_hw_release_args   arg;
-
-		arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
-		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
-			&arg);
-		ctx_isp->hw_ctx = NULL;
-	}
-
-	ctx->session_hdl = 0;
-	ctx->dev_hdl = 0;
-	ctx->link_hdl = 0;
-	ctx->ctx_crm_intf = NULL;
-
-	ctx->state =  CAM_CTX_AVAILABLE;
-	trace_cam_context_state("ISP", ctx);
+	rc = __cam_isp_ctx_release_dev_in_top_state(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Release device failed rc=%d", rc);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index d84be30..6060278 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/uaccess.h>
+#include <linux/debugfs.h>
 #include <uapi/media/cam_isp.h>
 #include "cam_smmu_api.h"
 #include "cam_req_mgr_workq.h"
@@ -495,6 +496,7 @@ static int cam_ife_hw_mgr_acquire_res_ife_out_rdi(
 		vfe_acquire.vfe_out.out_port_info = out_port;
 		vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT;
 		vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
+		vfe_acquire.vfe_out.is_dual = 0;
 		hw_intf = ife_src_res->hw_res[0]->hw_intf;
 		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
 			&vfe_acquire,
@@ -834,6 +836,7 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_ipp(
 
 	if (csid_res->is_dual_vfe) {
 		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_SLAVE;
+		csid_acquire.master_idx = csid_res->hw_res[0]->hw_intf->hw_idx;
 
 		for (j = i + 1; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
 			if (!ife_hw_mgr->csid_devices[j])
@@ -1038,13 +1041,6 @@ static int cam_ife_mgr_acquire_cid_res(
 	struct cam_hw_intf                  *hw_intf;
 	struct cam_csid_hw_reserve_resource_args  csid_acquire;
 
-	/* no dual vfe for TPG */
-	if ((in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) &&
-		(in_port->usage_type != 0)) {
-		CAM_ERR(CAM_ISP, "No Dual VFE on TPG input");
-		goto err;
-	}
-
 	ife_hw_mgr = ife_ctx->hw_mgr;
 
 	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
@@ -1727,6 +1723,16 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
 
 	cam_tasklet_start(ctx->common.tasklet_info);
 
+	/* set current csid debug information to CSID HW */
+	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+		if (g_ife_hw_mgr.csid_devices[i])
+			rc = g_ife_hw_mgr.csid_devices[i]->hw_ops.process_cmd(
+				g_ife_hw_mgr.csid_devices[i]->hw_priv,
+				CAM_IFE_CSID_SET_CSID_DEBUG,
+				&g_ife_hw_mgr.debug_cfg.csid_debug,
+				sizeof(g_ife_hw_mgr.debug_cfg.csid_debug));
+	}
+
 	/* INIT IFE Root: do nothing */
 
 	CAM_DBG(CAM_ISP, "INIT IFE CID ... in ctx id:%d",
@@ -1973,13 +1979,14 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 		/* get command buffers */
 		if (ctx->base[i].split_id != CAM_ISP_HW_SPLIT_MAX) {
 			rc = cam_isp_add_command_buffers(prepare,
-				ctx->base[i].split_id);
-			if (rc) {
-				CAM_ERR(CAM_ISP,
-					"Failed in add cmdbuf, i=%d, split_id=%d, rc=%d",
-					i, ctx->base[i].split_id, rc);
-				goto end;
-			}
+			ctx->base[i].split_id, ctx->base[i].idx,
+			ctx->res_list_ife_out, CAM_IFE_HW_OUT_RES_MAX);
+				if (rc) {
+					CAM_ERR(CAM_ISP,
+						"Failed in add cmdbuf, i=%d, split_id=%d, rc=%d",
+						i, ctx->base[i].split_id, rc);
+					goto end;
+				}
 		}
 
 		if (blob_info.hfr_config) {
@@ -2520,7 +2527,7 @@ static int cam_ife_hw_mgr_check_irq_for_dual_vfe(
 	case CAM_ISP_HW_EVENT_SOF:
 		event_cnt = ife_hw_mgr_ctx->sof_cnt;
 		break;
-	case CAM_ISP_HW_EVENT_REG_UPDATE:
+	case CAM_ISP_HW_EVENT_EPOCH:
 		event_cnt = ife_hw_mgr_ctx->epoch_cnt;
 		break;
 	case CAM_ISP_HW_EVENT_EOF:
@@ -2540,8 +2547,10 @@ static int cam_ife_hw_mgr_check_irq_for_dual_vfe(
 		return rc;
 	}
 
-	if ((event_cnt[core_idx0] - event_cnt[core_idx1] > 1) ||
-		(event_cnt[core_idx1] - event_cnt[core_idx0] > 1)) {
+	if ((event_cnt[core_idx0] &&
+		(event_cnt[core_idx0] - event_cnt[core_idx1] > 1)) ||
+		(event_cnt[core_idx1] &&
+		(event_cnt[core_idx1] - event_cnt[core_idx0] > 1))) {
 
 		CAM_WARN(CAM_ISP,
 			"One of the VFE cound not generate hw event %d",
@@ -2628,6 +2637,8 @@ static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
 
 				if (!epoch_status)
 					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
+				else
+					break;
 			}
 
 			/* SOF check for Right side VFE */
@@ -2637,6 +2648,8 @@ static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
 
 				if (!epoch_status)
 					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
+				else
+					break;
 			}
 
 			core_index0 = hw_res_l->hw_intf->hw_idx;
@@ -2739,6 +2752,8 @@ static int cam_ife_hw_mgr_process_camif_sof(
 				hw_res_l, evt_payload);
 			if (!sof_status)
 				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+			else
+				break;
 		}
 
 		/* SOF check for Right side VFE */
@@ -2754,6 +2769,8 @@ static int cam_ife_hw_mgr_process_camif_sof(
 				evt_payload);
 			if (!sof_status)
 				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+			else
+				break;
 		}
 
 		core_index0 = hw_res_l->hw_intf->hw_idx;
@@ -2932,6 +2949,8 @@ static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
 
 				if (!eof_status)
 					ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
+				else
+					break;
 			}
 
 			/* EOF check for Right side VFE */
@@ -2941,6 +2960,8 @@ static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
 
 				if (!eof_status)
 					ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
+				else
+					break;
 			}
 
 			core_index0 = hw_res_l->hw_intf->hw_idx;
@@ -2955,7 +2976,7 @@ static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
 			if (!rc)
 				ife_hwr_irq_eof_cb(
 					ife_hwr_mgr_ctx->common.cb_priv,
-					CAM_ISP_HW_EVENT_EPOCH,
+					CAM_ISP_HW_EVENT_EOF,
 					&eof_done_event_data);
 
 			break;
@@ -3015,6 +3036,8 @@ static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
 			hw_res_l->hw_intf->hw_idx))
 			buf_done_status = hw_res_l->bottom_half_handler(
 				hw_res_l, evt_payload);
+		else
+			continue;
 
 		switch (buf_done_status) {
 		case CAM_VFE_IRQ_STATUS_ERR_COMP:
@@ -3114,7 +3137,8 @@ int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv,
 	evt_payload = evt_payload_priv;
 	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)evt_payload->ctx;
 
-	CAM_DBG(CAM_ISP, "addr of evt_payload = %llx", (uint64_t)evt_payload);
+	CAM_DBG(CAM_ISP, "addr of evt_payload = %llx core index:0x%x",
+		(uint64_t)evt_payload, evt_payload->core_index);
 	CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
 	CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
 	CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
@@ -3144,7 +3168,9 @@ int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
 	evt_payload = evt_payload_priv;
 	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
 
-	CAM_DBG(CAM_ISP, "addr of evt_payload = %llx", (uint64_t)evt_payload);
+	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
+		(void *)evt_payload,
+		evt_payload->core_index);
 	CAM_DBG(CAM_ISP, "irq_status_0: = %x", evt_payload->irq_reg_val[0]);
 	CAM_DBG(CAM_ISP, "irq_status_1: = %x", evt_payload->irq_reg_val[1]);
 	CAM_DBG(CAM_ISP, "Violation register: = %x",
@@ -3215,6 +3241,51 @@ static int cam_ife_hw_mgr_sort_dev_with_caps(
 	return 0;
 }
 
+static int cam_ife_set_csid_debug(void *data, u64 val)
+{
+	g_ife_hw_mgr.debug_cfg.csid_debug = val;
+	CAM_DBG(CAM_ISP, "Set CSID Debug value :%lld", val);
+	return 0;
+}
+
+static int cam_ife_get_csid_debug(void *data, u64 *val)
+{
+	*val = g_ife_hw_mgr.debug_cfg.csid_debug;
+	CAM_DBG(CAM_ISP, "Get CSID Debug value :%lld",
+		g_ife_hw_mgr.debug_cfg.csid_debug);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_ife_csid_debug,
+	cam_ife_get_csid_debug,
+	cam_ife_set_csid_debug, "%16llu");
+
+static int cam_ife_hw_mgr_debug_register(void)
+{
+	g_ife_hw_mgr.debug_cfg.dentry = debugfs_create_dir("camera_ife",
+		NULL);
+
+	if (!g_ife_hw_mgr.debug_cfg.dentry) {
+		CAM_ERR(CAM_ISP, "failed to create dentry");
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_file("ife_csid_debug",
+		0644,
+		g_ife_hw_mgr.debug_cfg.dentry, NULL,
+		&cam_ife_csid_debug)) {
+		CAM_ERR(CAM_ISP, "failed to create cam_ife_csid_debug");
+		goto err;
+	}
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(g_ife_hw_mgr.debug_cfg.dentry);
+	return -ENOMEM;
+}
+
 int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
 {
 	int rc = -EFAULT;
@@ -3380,6 +3451,8 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
 	hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
 	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
 	hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
+
+	cam_ife_hw_mgr_debug_register();
 	CAM_DBG(CAM_ISP, "Exit");
 
 	return 0;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 750b43e..2e66210 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -81,6 +81,18 @@ struct ctx_base_info {
 };
 
 /**
+ * struct cam_ife_hw_mgr_debug - contain the debug information
+ *
+ * @dentry:              Debugfs entry
+ * @csid_debug:          csid debug information
+ *
+ */
+struct cam_ife_hw_mgr_debug {
+	struct dentry                  *dentry;
+	uint64_t                        csid_debug;
+};
+
+/**
  * struct cam_vfe_hw_mgr_ctx - IFE HW manager Context object
  *
  * @list:                   used by the ctx list.
@@ -142,7 +154,6 @@ struct cam_ife_hw_mgr_ctx {
 	uint32_t                        eof_cnt[CAM_IFE_HW_NUM_MAX];
 	atomic_t                        overflow_pending;
 	uint32_t                        is_rdi_only_context;
-
 };
 
 /**
@@ -176,6 +187,7 @@ struct cam_ife_hw_mgr {
 						CAM_IFE_CSID_HW_NUM_MAX];
 	struct cam_vfe_hw_get_hw_cap   ife_dev_caps[CAM_IFE_HW_NUM_MAX];
 	struct cam_req_mgr_core_workq  *workq;
+	struct cam_ife_hw_mgr_debug     debug_cfg;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index c58578e..5f68f21 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -82,10 +82,66 @@ int cam_isp_add_change_base(
 	return rc;
 }
 
+static int cam_isp_update_dual_config(
+	struct cam_hw_prepare_update_args  *prepare,
+	struct cam_cmd_buf_desc            *cmd_desc,
+	uint32_t                            split_id,
+	uint32_t                            base_idx,
+	struct cam_ife_hw_mgr_res          *res_list_isp_out,
+	uint32_t                            size_isp_out)
+{
+	int rc = -EINVAL;
+	struct cam_isp_dual_config                 *dual_config;
+	struct cam_ife_hw_mgr_res                  *hw_mgr_res;
+	struct cam_isp_resource_node               *res;
+	struct cam_isp_hw_dual_isp_update_args      dual_isp_update_args;
+	size_t                                      len = 0;
+	uint32_t                                   *cpu_addr;
+	uint32_t                                    i, j;
+
+	CAM_DBG(CAM_UTIL, "cmd des size %d, length: %d",
+		cmd_desc->size, cmd_desc->length);
+
+	rc = cam_packet_util_get_cmd_mem_addr(
+		cmd_desc->mem_handle, &cpu_addr, &len);
+	if (rc)
+		return rc;
+
+	cpu_addr += (cmd_desc->offset / 4);
+	dual_config = (struct cam_isp_dual_config *)cpu_addr;
+
+	for (i = 0; i < dual_config->num_ports; i++) {
+		hw_mgr_res = &res_list_isp_out[i];
+		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+			if (!hw_mgr_res->hw_res[j])
+				continue;
+
+			if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
+				continue;
+
+			res = hw_mgr_res->hw_res[j];
+			dual_isp_update_args.split_id = j;
+			dual_isp_update_args.res      = res;
+			dual_isp_update_args.dual_cfg = dual_config;
+			rc = res->hw_intf->hw_ops.process_cmd(
+				res->hw_intf->hw_priv,
+				CAM_VFE_HW_CMD_STRIPE_UPDATE,
+				&dual_isp_update_args,
+				sizeof(struct cam_isp_hw_dual_isp_update_args));
+			if (rc)
+				return rc;
+		}
+	}
+
+	return rc;
+}
 
 int cam_isp_add_command_buffers(
 	struct cam_hw_prepare_update_args  *prepare,
-	uint32_t                            split_id)
+	enum cam_isp_hw_split_id            split_id,
+	uint32_t                            base_idx,
+	struct cam_ife_hw_mgr_res          *res_list_isp_out,
+	uint32_t                            size_isp_out)
 {
 	int rc = 0;
 	uint32_t  cmd_meta_data, num_ent, i;
@@ -168,6 +224,14 @@ int cam_isp_add_command_buffers(
 
 			num_ent++;
 			break;
+		case CAM_ISP_PACKET_META_DUAL_CONFIG:
+			rc = cam_isp_update_dual_config(prepare,
+				&cmd_desc[i], split_id, base_idx,
+				res_list_isp_out, size_isp_out);
+
+			if (rc)
+				return rc;
+			break;
 		case CAM_ISP_PACKET_META_GENERIC_BLOB:
 			break;
 		default:
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
index 24b532e..cce0071 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
@@ -54,14 +54,19 @@ int cam_isp_add_change_base(
  *                         left or right VFE/IFE instance.
  *
  * @prepare:               Contain the  packet and HW update variables
- * @dual_type:             Left of right command buffers to be extracted
- *
+ * @split_id:              Left or right command buffers to be extracted
+ * @base_idx:              Base or dev index of the IFE/VFE HW instance
+ * @res_list_isp_out:      IFE /VFE out resource list
+ * @size_isp_out:          Size of the res_list_isp_out array
  * @return:                0 for success
  *                         -EINVAL for Fail
  */
 int cam_isp_add_command_buffers(
-	struct cam_hw_prepare_update_args    *prepare,
-	enum cam_isp_hw_split_id              split_id);
+	struct cam_hw_prepare_update_args  *prepare,
+	enum cam_isp_hw_split_id            split_id,
+	uint32_t                            base_idx,
+	struct cam_ife_hw_mgr_res          *res_list_isp_out,
+	uint32_t                            size_isp_out);
 
 /**
  * @brief                  Add io buffer configurations in the HW entries list
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
index 8ff2a55..c68ddf7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
@@ -199,7 +199,7 @@ static struct cam_ife_csid_csi2_rx_reg_offset
 	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
 	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
 	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
-	.csid_csi2_rx_captured_cphy_pkt_ftr_addr      = 0x13c,
+	.csid_csi2_rx_captured_cphy_pkt_hdr_addr      = 0x13c,
 	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
 	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
 	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
@@ -213,6 +213,14 @@ static struct cam_ife_csid_csi2_rx_reg_offset
 	.csi2_irq_mask_all                            = 0xFFFFFFF,
 	.csi2_misr_enable_shift_val                   = 6,
 	.csi2_vc_mode_shift_val                       = 2,
+	.csi2_capture_long_pkt_en_shift               = 0,
+	.csi2_capture_short_pkt_en_shift              = 1,
+	.csi2_capture_cphy_pkt_en_shift               = 2,
+	.csi2_capture_long_pkt_dt_shift               = 4,
+	.csi2_capture_long_pkt_vc_shift               = 10,
+	.csi2_capture_short_pkt_vc_shift              = 15,
+	.csi2_capture_cphy_pkt_dt_shift               = 20,
+	.csi2_capture_cphy_pkt_vc_shift               = 26,
 };
 
 static struct cam_ife_csid_csi2_tpg_reg_offset
@@ -236,9 +244,10 @@ static struct cam_ife_csid_csi2_tpg_reg_offset
 	.csid_tpg_cgen_n_y1_addr                      = 0x664,
 	.csid_tpg_cgen_n_y2_addr                      = 0x668,
 
-	/*configurations */
+	/* configurations */
 	.tpg_dtn_cfg_offset                           = 0xc,
 	.tpg_cgen_cfg_offset                          = 0x20,
+	.tpg_cpas_ife_reg_offset                      = 0x28,
 };
 
 static struct cam_ife_csid_common_reg_offset
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 9103136..7d6e758 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -32,8 +32,6 @@
 #define CAM_IFE_CSID_TIMEOUT_SLEEP_US                  1000
 #define CAM_IFE_CSID_TIMEOUT_ALL_US                    1000000
 
-#define MEASURE_EN                                     0
-
 static int cam_ife_csid_is_ipp_format_supported(
 	uint32_t in_format)
 {
@@ -85,6 +83,7 @@ static int cam_ife_csid_get_format_rdi(
 	case CAM_FORMAT_MIPI_RAW_8:
 		switch (out_format) {
 		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_PLAIN128:
 			*decode_fmt = 0xf;
 			break;
 		case CAM_FORMAT_PLAIN8:
@@ -99,6 +98,7 @@ static int cam_ife_csid_get_format_rdi(
 	case CAM_FORMAT_MIPI_RAW_10:
 		switch (out_format) {
 		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_PLAIN128:
 			*decode_fmt = 0xf;
 			break;
 		case CAM_FORMAT_PLAIN16_10:
@@ -196,7 +196,7 @@ static int cam_ife_csid_get_format_rdi(
 	}
 
 	if (rc)
-		CAM_ERR(CAM_ISP, "Unsupported format pair in %d out %d\n",
+		CAM_ERR(CAM_ISP, "Unsupported format pair in %d out %d",
 			in_format, out_format);
 
 	return rc;
@@ -562,7 +562,7 @@ static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
 	struct cam_ife_csid_cid_data       *cid_data;
 
 	CAM_DBG(CAM_ISP,
-		"CSID:%d res_sel:%d Lane type:%d lane_num:%d dt:%d vc:%d",
+		"CSID:%d res_sel:0x%x Lane type:%d lane_num:%d dt:%d vc:%d",
 		csid_hw->hw_intf->hw_idx,
 		cid_reserv->in_port->res_type,
 		cid_reserv->in_port->lane_type,
@@ -683,11 +683,24 @@ static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
 			}
 			csid_hw->tpg_cfg.in_format =
 				cid_reserv->in_port->format;
-			csid_hw->tpg_cfg.width =
-				cid_reserv->in_port->left_width;
+			csid_hw->tpg_cfg.usage_type =
+				cid_reserv->in_port->usage_type;
+			if (cid_reserv->in_port->usage_type)
+				csid_hw->tpg_cfg.width =
+					(cid_reserv->in_port->right_stop + 1);
+			else
+				csid_hw->tpg_cfg.width =
+					cid_reserv->in_port->left_width;
+
 			csid_hw->tpg_cfg.height = cid_reserv->in_port->height;
 			csid_hw->tpg_cfg.test_pattern =
 				cid_reserv->in_port->test_pattern;
+
+			CAM_DBG(CAM_ISP, "CSID:%d TPG width:%d height:%d",
+				csid_hw->hw_intf->hw_idx,
+				csid_hw->tpg_cfg.width,
+				csid_hw->tpg_cfg.height);
+
 			cid_data->tpg_set = 1;
 		} else {
 			csid_hw->csi2_rx_cfg.phy_sel =
@@ -815,6 +828,7 @@ static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
 	path_data->sync_mode = reserve->sync_mode;
 	path_data->height  = reserve->in_port->height;
 	path_data->start_line = reserve->in_port->line_start;
+	path_data->end_line = reserve->in_port->line_stop;
 	if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
 		path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
 		path_data->vc = CAM_IFE_CSID_TPG_VC_VAL;
@@ -826,18 +840,32 @@ static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
 	if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
 		path_data->crop_enable = 1;
 		path_data->start_pixel = reserve->in_port->left_start;
+		path_data->end_pixel = reserve->in_port->left_stop;
 		path_data->width  = reserve->in_port->left_width;
+		CAM_DBG(CAM_ISP, "CSID:%dmaster:startpixel 0x%x endpixel:0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_pixel,
+			path_data->end_pixel);
+		CAM_DBG(CAM_ISP, "CSID:%dmaster:line start:0x%x line end:0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_line,
+			path_data->end_line);
 	} else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
 		path_data->crop_enable = 1;
 		path_data->start_pixel = reserve->in_port->right_start;
+		path_data->end_pixel = reserve->in_port->right_stop;
 		path_data->width  = reserve->in_port->right_width;
+		CAM_DBG(CAM_ISP, "CSID:%d slave:start:0x%x end:0x%x width 0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_pixel,
+			path_data->end_pixel, path_data->width);
+		CAM_DBG(CAM_ISP, "CSID:%dmaster:line start:0x%x line end:0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_line,
+			path_data->end_line);
 	} else {
 		path_data->crop_enable = 0;
 		path_data->width  = reserve->in_port->left_width;
 		path_data->start_pixel = reserve->in_port->left_start;
 	}
 
-	CAM_DBG(CAM_ISP, "Res %d width %d height %d\n", reserve->res_id,
+	CAM_DBG(CAM_ISP, "Res %d width %d height %d", reserve->res_id,
 		path_data->width, path_data->height);
 	reserve->node_res = res;
 
@@ -994,6 +1022,7 @@ static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
 static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw   *csid_hw,
 	struct cam_isp_resource_node       *res)
 {
+	int rc = 0;
 	uint32_t  val = 0;
 	struct cam_hw_soc_info    *soc_info;
 
@@ -1039,6 +1068,15 @@ static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw   *csid_hw,
 			}
 		}
 
+		/* Enable the IFE force clock on for dual isp case */
+		if (csid_hw->tpg_cfg.usage_type) {
+			rc = cam_ife_csid_enable_ife_force_clock_on(soc_info,
+				csid_hw->csid_info->csid_reg->tpg_reg->
+				tpg_cpas_ife_reg_offset);
+			if (rc)
+				return rc;
+		}
+
 		CAM_DBG(CAM_ISP, "============ TPG control ============");
 		val = (4 << 20);
 		val |= (0x80 << 8);
@@ -1058,6 +1096,7 @@ static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw   *csid_hw,
 static int cam_ife_csid_tpg_stop(struct cam_ife_csid_hw   *csid_hw,
 	struct cam_isp_resource_node       *res)
 {
+	int rc = 0;
 	struct cam_hw_soc_info    *soc_info;
 
 	if (csid_hw->tpg_start_cnt)
@@ -1073,6 +1112,12 @@ static int cam_ife_csid_tpg_stop(struct cam_ife_csid_hw   *csid_hw,
 		CAM_DBG(CAM_ISP, "CSID:%d stop CSID TPG",
 			csid_hw->hw_intf->hw_idx);
 
+		/* Disable the IFE force clock on for dual isp case */
+		if (csid_hw->tpg_cfg.usage_type)
+			rc = cam_ife_csid_disable_ife_force_clock_on(soc_info,
+				csid_hw->csid_info->csid_reg->tpg_reg->
+				tpg_cpas_ife_reg_offset);
+
 		/*stop the TPG */
 		cam_io_w_mb(0,  soc_info->reg_map[0].mem_base +
 		csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_ctrl_addr);
@@ -1100,8 +1145,8 @@ static int cam_ife_csid_config_tpg(struct cam_ife_csid_hw   *csid_hw,
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 			csid_reg->tpg_reg->csid_tpg_vc_cfg0_addr);
 
-	/* vertical blanking count = 0x740, horzontal blanking count = 0x740*/
-	val = (0x740 << 12) | 0x740;
+	/* vertical blanking count = 0x3FF, horzontal blanking count = 0x740*/
+	val = (0x3FF << 12) | 0x740;
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 			csid_reg->tpg_reg->csid_tpg_vc_cfg1_addr);
 
@@ -1203,6 +1248,28 @@ static int cam_ife_csid_enable_csi2(
 		CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION |
 		CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION |
 		CSID_CSI2_RX_ERROR_CPHY_PH_CRC;
+
+	/* Enable the interrupt based on csid debug info set */
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOT_IRQ)
+		val |= CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ)
+		val |= CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+		val |= CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+		val |= CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED;
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+		val |= CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED;
+
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
 
@@ -1277,9 +1344,9 @@ static int cam_ife_csid_init_config_ipp_path(
 		(path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
 		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
 		(decode_format << csid_reg->cmn_reg->fmt_shift_val) |
-		(path_data->crop_enable & 1 <<
+		(path_data->crop_enable <<
 		csid_reg->cmn_reg->crop_h_en_shift_val) |
-		(path_data->crop_enable & 1 <<
+		(path_data->crop_enable <<
 		csid_reg->cmn_reg->crop_v_en_shift_val) |
 		(1 << 1) | 1;
 	val |= (1 << csid_reg->ipp_reg->pix_store_en_shift_val);
@@ -1291,21 +1358,21 @@ static int cam_ife_csid_init_config_ipp_path(
 		csid_reg->ipp_reg->csid_ipp_cfg1_addr);
 
 	if (path_data->crop_enable) {
-		val = ((path_data->width +
-			path_data->start_pixel) & 0xFFFF <<
+		val = (((path_data->end_pixel & 0xFFFF) <<
 			csid_reg->cmn_reg->crop_shift) |
-			(path_data->start_pixel & 0xFFFF);
-
+			(path_data->start_pixel & 0xFFFF));
 		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 			csid_reg->ipp_reg->csid_ipp_hcrop_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d Horizontal crop config val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
 
-		val = ((path_data->height +
-			path_data->start_line) & 0xFFFF <<
+		val = (((path_data->end_line & 0xFFFF) <<
 			csid_reg->cmn_reg->crop_shift) |
-			(path_data->start_line & 0xFFFF);
-
+			(path_data->start_line & 0xFFFF));
 		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 			csid_reg->ipp_reg->csid_ipp_vcrop_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
 	}
 
 	/* set frame drop pattern to 0 and period to 1 */
@@ -1350,6 +1417,34 @@ static int cam_ife_csid_init_config_ipp_path(
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->ipp_reg->csid_ipp_cfg0_addr);
 
+	/* configure the rx packet capture based on csid debug set */
+	val = 0;
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+		val = ((1 <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_en_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_vc_shift));
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_capture_ctrl_addr);
+	CAM_DBG(CAM_ISP, "rx capture control value 0x%x", val);
+
 	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
 
 	return rc;
@@ -1439,6 +1534,12 @@ static int cam_ife_csid_enable_ipp_path(
 
 	/* Enable the required ipp interrupts */
 	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)
+		val |= CSID_PATH_INFO_INPUT_SOF;
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ)
+		val |= CSID_PATH_INFO_INPUT_EOF;
+
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
 
@@ -1567,9 +1668,9 @@ static int cam_ife_csid_init_config_rdi_path(
 		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
 		(path_format << csid_reg->cmn_reg->fmt_shift_val) |
 		(plain_fmt << csid_reg->cmn_reg->plain_fmt_shit_val) |
-		(path_data->crop_enable & 1 <<
+		(path_data->crop_enable  <<
 			csid_reg->cmn_reg->crop_h_en_shift_val) |
-		(path_data->crop_enable & 1 <<
+		(path_data->crop_enable  <<
 		csid_reg->cmn_reg->crop_v_en_shift_val) |
 		(1 << 2) | 3;
 
@@ -1581,21 +1682,23 @@ static int cam_ife_csid_init_config_rdi_path(
 			csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr);
 
 	if (path_data->crop_enable) {
-		val = ((path_data->width +
-			path_data->start_pixel) & 0xFFFF <<
+		val = (((path_data->end_pixel & 0xFFFF) <<
 			csid_reg->cmn_reg->crop_shift) |
-			(path_data->start_pixel & 0xFFFF);
+			(path_data->start_pixel & 0xFFFF));
 
 		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 			csid_reg->rdi_reg[id]->csid_rdi_rpp_hcrop_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d Horizontal crop config val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
 
-		val = ((path_data->height +
-			path_data->start_line) & 0xFFFF <<
+		val = (((path_data->end_line & 0xFFFF) <<
 			csid_reg->cmn_reg->crop_shift) |
-			(path_data->start_line & 0xFFFF);
+			(path_data->start_line & 0xFFFF));
 
 		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 			csid_reg->rdi_reg[id]->csid_rdi_rpp_vcrop_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
 	}
 	/* set frame drop pattern to 0 and period to 1 */
 	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
@@ -1627,21 +1730,35 @@ static int cam_ife_csid_init_config_rdi_path(
 	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
 		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
 	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
-#if MEASURE_EN
-	val |= 0x2;
-	cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base +
-		csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr);
-	cam_io_w_mb(path_data->height << 16 | path_data->width,
-		soc_info->reg_map[0].mem_base +
-		csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg1_addr);
-	CAM_DBG(CAM_ISP, "measure_cfg1 0x%x offset 0x%x\n",
-		path_data->height << 16 | path_data->width,
-		csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr);
 
-#endif
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
 
+	/* configure the rx packet capture based on csid debug set */
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+		val = ((1 <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_en_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_vc_shift));
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_capture_ctrl_addr);
+
 	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
 
 	return rc;
@@ -1708,11 +1825,13 @@ static int cam_ife_csid_enable_rdi_path(
 			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
 
 	/* Enable the required RDI interrupts */
-	val = CSID_PATH_INFO_RST_DONE |
-#if MEASURE_EN
-		CSID_PATH_INFO_INPUT_SOF |
-#endif
-		CSID_PATH_ERROR_FIFO_OVERFLOW;
+	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)
+		val |= CSID_PATH_INFO_INPUT_SOF;
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ)
+		val |= CSID_PATH_INFO_INPUT_EOF;
+
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
 
@@ -1844,6 +1963,20 @@ static int cam_ife_csid_get_time_stamp(
 
 	return 0;
 }
+
+static int cam_ife_csid_set_csid_debug(struct cam_ife_csid_hw   *csid_hw,
+	void *cmd_args)
+{
+	uint32_t  *csid_debug;
+
+	csid_debug = (uint32_t  *) cmd_args;
+	csid_hw->csid_debug = *csid_debug;
+	CAM_DBG(CAM_ISP, "CSID:%d set csid debug value:%d",
+		csid_hw->hw_intf->hw_idx, csid_hw->csid_debug);
+
+	return 0;
+}
+
 static int cam_ife_csid_res_wait_for_halt(
 	struct cam_ife_csid_hw          *csid_hw,
 	struct cam_isp_resource_node    *res)
@@ -2387,6 +2520,9 @@ static int cam_ife_csid_process_cmd(void *hw_priv,
 	case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
 		rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
 		break;
+	case CAM_IFE_CSID_SET_CSID_DEBUG:
+		rc = cam_ife_csid_set_csid_debug(csid_hw, cmd_args);
+		break;
 	default:
 		CAM_ERR(CAM_ISP, "CSID:%d un supported cmd:%d",
 			csid_hw->hw_intf->hw_idx, cmd_type);
@@ -2405,9 +2541,7 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
 	struct cam_ife_csid_reg_offset  *csid_reg;
 	uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0,
 		irq_status_rdi[4];
-#if MEASURE_EN
 	uint32_t val;
-#endif
 
 	csid_hw = (struct cam_ife_csid_hw *)data;
 
@@ -2501,6 +2635,93 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
 			 csid_hw->hw_intf->hw_idx);
 	}
 
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ) {
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) {
+			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL0_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED) {
+			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL1_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED) {
+			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL2_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED) {
+			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL3_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+	}
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOT_IRQ) {
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED) {
+			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL0_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED) {
+			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL1_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED) {
+			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL2_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED) {
+			CAM_ERR(CAM_ISP, "CSID:%d PHY_DL3_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+	}
+
+	if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE) &&
+		(irq_status_rx & CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED)) {
+		CAM_ERR(CAM_ISP, "CSID:%d LONG_PKT_CAPTURED",
+			csid_hw->hw_intf->hw_idx);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->csi2_reg->
+			csid_csi2_rx_captured_long_pkt_0_addr);
+		CAM_ERR(CAM_ISP, "CSID:%d long packet VC :%d DT:%d WC:%d",
+			csid_hw->hw_intf->hw_idx,
+			(val >> 22), ((val >> 16) & 0x3F), (val & 0xFFFF));
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->csi2_reg->
+			csid_csi2_rx_captured_long_pkt_1_addr);
+		CAM_ERR(CAM_ISP, "CSID:%d long packet ECC :%d",
+			csid_hw->hw_intf->hw_idx, val);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->csi2_reg->
+			csid_csi2_rx_captured_long_pkt_ftr_addr);
+		CAM_ERR(CAM_ISP, "CSID:%d long pkt cal CRC:%d expected CRC:%d",
+			csid_hw->hw_intf->hw_idx, (val >> 16), (val & 0xFFFF));
+	}
+	if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) &&
+		(irq_status_rx & CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED)) {
+		CAM_ERR(CAM_ISP, "CSID:%d SHORT_PKT_CAPTURED",
+			csid_hw->hw_intf->hw_idx);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->csi2_reg->
+			csid_csi2_rx_captured_short_pkt_0_addr);
+		CAM_ERR(CAM_ISP, "CSID:%d short pkt VC :%d DT:%d LC:%d",
+			csid_hw->hw_intf->hw_idx,
+			(val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF));
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->csi2_reg->
+			csid_csi2_rx_captured_short_pkt_1_addr);
+		CAM_ERR(CAM_ISP, "CSID:%d short packet ECC :%d", val);
+	}
+
+	if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE) &&
+		(irq_status_rx & CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED)) {
+		CAM_ERR(CAM_ISP, "CSID:%d CPHY_PKT_HDR_CAPTURED",
+			csid_hw->hw_intf->hw_idx);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->csi2_reg->
+			csid_csi2_rx_captured_cphy_pkt_hdr_addr);
+		CAM_ERR(CAM_ISP, "CSID:%d cphy packet VC :%d DT:%d WC:%d",
+			csid_hw->hw_intf->hw_idx,
+			(val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF));
+	}
+
 	/*read the IPP errors */
 	if (csid_reg->cmn_reg->no_pix) {
 		/* IPP reset done bit */
@@ -2509,14 +2730,16 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
 			CAM_DBG(CAM_ISP, "CSID IPP reset complete");
 			complete(&csid_hw->csid_ipp_complete);
 		}
-		if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOF)
-			CAM_DBG(CAM_ISP, "CSID IPP SOF received");
-		if (irq_status_ipp & CSID_PATH_INFO_INPUT_SOL)
-			CAM_DBG(CAM_ISP, "CSID IPP SOL received");
-		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOL)
-			CAM_DBG(CAM_ISP, "CSID IPP EOL received");
-		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
-			CAM_DBG(CAM_ISP, "CSID IPP EOF received");
+
+		if ((irq_status_ipp & CSID_PATH_INFO_INPUT_SOF) &&
+			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ))
+			CAM_ERR(CAM_ISP, "CSID:%d IPP SOF received",
+				csid_hw->hw_intf->hw_idx);
+
+		if ((irq_status_ipp & CSID_PATH_INFO_INPUT_EOF) &&
+			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
+			CAM_ERR(CAM_ISP, "CSID:%d IPP EOF received",
+				csid_hw->hw_intf->hw_idx);
 
 		if (irq_status_ipp & CSID_PATH_INFO_INPUT_EOF)
 			complete(&csid_hw->csid_ipp_complete);
@@ -2534,21 +2757,17 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
 	for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
 		if (irq_status_rdi[i] &
 			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
-			CAM_DBG(CAM_ISP, "CSID rdi%d reset complete", i);
+			CAM_DBG(CAM_ISP, "CSID RDI%d reset complete", i);
 			complete(&csid_hw->csid_rdin_complete[i]);
 		}
 
-		if (irq_status_rdi[i]  & CSID_PATH_INFO_INPUT_SOF) {
-			CAM_DBG(CAM_ISP, "CSID RDI SOF received");
-#if MEASURE_EN
-			val = cam_io_r(soc_info->reg_map[0].mem_base +
-				csid_reg->rdi_reg[i]->
-				csid_rdi_format_measure0_addr);
-			CAM_ERR(CAM_ISP, "measure 0x%x\n", val);
-#endif
-		}
-		if (irq_status_rdi[i]  & CSID_PATH_INFO_INPUT_EOF)
-			CAM_DBG(CAM_ISP, "CSID RDI EOF received");
+		if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF) &&
+			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ))
+			CAM_ERR(CAM_ISP, "CSID RDI:%d SOF received", i);
+
+		if ((irq_status_rdi[i]  & CSID_PATH_INFO_INPUT_EOF) &&
+			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
+			CAM_ERR(CAM_ISP, "CSID RDI:%d EOF received", i);
 
 		if (irq_status_rdi[i] & CSID_PATH_INFO_INPUT_EOF)
 			complete(&csid_hw->csid_rdin_complete[i]);
@@ -2676,6 +2895,7 @@ int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
 		ife_csid_hw->rdi_res[i].res_priv = path_data;
 	}
 
+	ife_csid_hw->csid_debug = 0;
 	return 0;
 err:
 	if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index d5f032f..deef41f 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -22,7 +22,7 @@
 #define CAM_IFE_CSID_RDI_MAX         4
 
 #define CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED    BIT(0)
-#define CSID_CSI2_RX_NFO_PHY_DL1_EOT_CAPTURED     BIT(1)
+#define CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED    BIT(1)
 #define CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED    BIT(2)
 #define CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED    BIT(3)
 #define CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED    BIT(4)
@@ -65,6 +65,18 @@
 #define CSID_PATH_ERROR_PIX_COUNT                 BIT(13)
 #define CSID_PATH_ERROR_LINE_COUNT                BIT(14)
 
+/*
+ * Debug values enable the corresponding interrupts and debug logs provide
+ * necessary information
+ */
+#define CSID_DEBUG_ENABLE_SOF_IRQ                 BIT(0)
+#define CSID_DEBUG_ENABLE_EOF_IRQ                 BIT(1)
+#define CSID_DEBUG_ENABLE_SOT_IRQ                 BIT(2)
+#define CSID_DEBUG_ENABLE_EOT_IRQ                 BIT(3)
+#define CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE       BIT(4)
+#define CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE        BIT(5)
+#define CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE        BIT(6)
+
 /* enum cam_csid_path_halt_mode select the path halt mode control */
 enum cam_csid_path_halt_mode {
 	CSID_HALT_MODE_INTERNAL,
@@ -187,7 +199,7 @@ struct cam_ife_csid_csi2_rx_reg_offset {
 	uint32_t csid_csi2_rx_captured_long_pkt_0_addr;
 	uint32_t csid_csi2_rx_captured_long_pkt_1_addr;
 	uint32_t csid_csi2_rx_captured_long_pkt_ftr_addr;
-	uint32_t csid_csi2_rx_captured_cphy_pkt_ftr_addr;
+	uint32_t csid_csi2_rx_captured_cphy_pkt_hdr_addr;
 	uint32_t csid_csi2_rx_lane0_misr_addr;
 	uint32_t csid_csi2_rx_lane1_misr_addr;
 	uint32_t csid_csi2_rx_lane2_misr_addr;
@@ -202,6 +214,14 @@ struct cam_ife_csid_csi2_rx_reg_offset {
 	uint32_t csi2_irq_mask_all;
 	uint32_t csi2_misr_enable_shift_val;
 	uint32_t csi2_vc_mode_shift_val;
+	uint32_t csi2_capture_long_pkt_en_shift;
+	uint32_t csi2_capture_short_pkt_en_shift;
+	uint32_t csi2_capture_cphy_pkt_en_shift;
+	uint32_t csi2_capture_long_pkt_dt_shift;
+	uint32_t csi2_capture_long_pkt_vc_shift;
+	uint32_t csi2_capture_short_pkt_vc_shift;
+	uint32_t csi2_capture_cphy_pkt_dt_shift;
+	uint32_t csi2_capture_cphy_pkt_vc_shift;
 };
 
 struct cam_ife_csid_csi2_tpg_reg_offset {
@@ -226,6 +246,7 @@ struct cam_ife_csid_csi2_tpg_reg_offset {
 	/*configurations */
 	uint32_t tpg_dtn_cfg_offset;
 	uint32_t tpg_cgen_cfg_offset;
+	uint32_t tpg_cpas_ife_reg_offset;
 
 };
 struct cam_ife_csid_common_reg_offset {
@@ -321,6 +342,7 @@ struct cam_ife_csid_csi2_rx_cfg  {
  * @height:           height
  * @test_pattern :    pattern
  * @in_format:        decode format
+ * @usage_type:       whether dual isp is required
  *
  */
 struct cam_ife_csid_tpg_cfg  {
@@ -328,6 +350,7 @@ struct cam_ife_csid_tpg_cfg  {
 	uint32_t                        height;
 	uint32_t                        test_pattern;
 	uint32_t                        in_format;
+	uint32_t                        usage_type;
 };
 
 /**
@@ -358,8 +381,10 @@ struct cam_ife_csid_cid_data {
  * @crop_enable:    crop is enable or disabled, if enabled
  *                  then remaining parameters are valid.
  * @start_pixel:    start pixel
+ * @end_pixel:      end_pixel
  * @width:          width
  * @start_line:     start line
+ * @end_line:       end_line
  * @height:         heigth
  * @sync_mode:       Applicable for IPP/RDI path reservation
  *                  Reserving the path for master IPP or slave IPP
@@ -377,8 +402,10 @@ struct cam_ife_csid_path_cfg {
 	uint32_t                        out_format;
 	bool                            crop_enable;
 	uint32_t                        start_pixel;
+	uint32_t                        end_pixel;
 	uint32_t                        width;
 	uint32_t                        start_line;
+	uint32_t                        end_line;
 	uint32_t                        height;
 	enum cam_isp_hw_sync_mode       sync_mode;
 	uint32_t                        master_idx;
@@ -403,6 +430,8 @@ struct cam_ife_csid_path_cfg {
  * @csid_csi2_reset_complete: csi2 reset completion
  * @csid_ipp_reset_complete:  ipp reset completion
  * @csid_rdin_reset_complete: rdi n completion
+ * @csid_debug:               csid debug information to enable the SOT, EOT,
+ *                            SOF, EOF, measure etc in the csid hw
  *
  */
 struct cam_ife_csid_hw {
@@ -422,6 +451,7 @@ struct cam_ife_csid_hw {
 	struct completion                csid_csi2_complete;
 	struct completion                csid_ipp_complete;
 	struct completion    csid_rdin_complete[CAM_IFE_CSID_RDI_MAX];
+	uint64_t                         csid_debug;
 };
 
 int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h
index e857f8b..952426d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite170.h
@@ -210,7 +210,7 @@ static struct cam_ife_csid_csi2_rx_reg_offset
 	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
 	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
 	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
-	.csid_csi2_rx_captured_cphy_pkt_ftr_addr      = 0x13c,
+	.csid_csi2_rx_captured_cphy_pkt_hdr_addr      = 0x13c,
 	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
 	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
 	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
@@ -224,6 +224,14 @@ static struct cam_ife_csid_csi2_rx_reg_offset
 	.csi2_irq_mask_all                            = 0xFFFFFFF,
 	.csi2_misr_enable_shift_val                   = 6,
 	.csi2_vc_mode_shift_val                       = 2,
+	.csi2_capture_long_pkt_en_shift               = 0,
+	.csi2_capture_short_pkt_en_shift              = 1,
+	.csi2_capture_cphy_pkt_en_shift               = 2,
+	.csi2_capture_long_pkt_dt_shift               = 4,
+	.csi2_capture_long_pkt_vc_shift               = 10,
+	.csi2_capture_short_pkt_vc_shift              = 15,
+	.csi2_capture_cphy_pkt_dt_shift               = 20,
+	.csi2_capture_cphy_pkt_vc_shift               = 26,
 };
 
 
@@ -252,6 +260,7 @@ static struct cam_ife_csid_csi2_tpg_reg_offset
 	/*configurations */
 	.tpg_dtn_cfg_offset                           = 0xc,
 	.tpg_cgen_cfg_offset                          = 0x20,
+	.tpg_cpas_ife_reg_offset                      = 0x28,
 };
 
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
index 020599d..c036bca 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -181,3 +181,59 @@ int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info)
 	return rc;
 }
 
+int cam_ife_csid_enable_ife_force_clock_on(struct cam_hw_soc_info  *soc_info,
+	uint32_t cpas_ife_base_offset)
+{
+	int rc = 0;
+	struct cam_csid_soc_private       *soc_private;
+	uint32_t                           cpass_ife_force_clk_offset;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		return -EINVAL;
+	}
+
+	soc_private = soc_info->soc_private;
+	cpass_ife_force_clk_offset =
+		cpas_ife_base_offset + (0x4 * soc_info->index);
+	rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REG_CPASTOP,
+		cpass_ife_force_clk_offset, 1, 1);
+
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPASS set IFE:%d Force clock On failed",
+			soc_info->index);
+	else
+		CAM_DBG(CAM_ISP, "CPASS set IFE:%d Force clock On",
+		soc_info->index);
+
+	return rc;
+}
+
+int cam_ife_csid_disable_ife_force_clock_on(struct cam_hw_soc_info *soc_info,
+	uint32_t cpas_ife_base_offset)
+{
+	int rc = 0;
+	struct cam_csid_soc_private       *soc_private;
+	uint32_t                           cpass_ife_force_clk_offset;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		return -EINVAL;
+	}
+
+	soc_private = soc_info->soc_private;
+	cpass_ife_force_clk_offset =
+		cpas_ife_base_offset + (0x4 * soc_info->index);
+	rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REG_CPASTOP,
+		cpass_ife_force_clk_offset,  1, 0);
+
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPASS set IFE:%d Force clock Off failed",
+			soc_info->index);
+	else
+		CAM_DBG(CAM_ISP, "CPASS set IFE:%d Force clock off",
+		soc_info->index);
+
+	return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
index 1a30722..8e963de 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
@@ -82,4 +82,33 @@ int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info  *soc_info);
  */
 int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info);
 
+/**
+ * cam_ife_csid_enable_ife_force_clock()
+ *
+ * @brief:                 if csid testgen used for dual isp case, before
+ *                         starting csid test gen, enable ife force clock on
+ *                         through cpas
+ *
+ * @soc_info:              soc info structure pointer
+ * @cpas_ife_base_offset:  cpas ife force clock base reg offset value
+ *
+ */
+int cam_ife_csid_enable_ife_force_clock_on(struct cam_hw_soc_info  *soc_info,
+	uint32_t cpas_ife_base_offset);
+
+/**
+ * cam_ife_csid_disable_ife_force_clock_on()
+ *
+ * @brief:                 disable the IFE force clock on after dual ISP
+ *                         CSID test gen stop
+ *
+ * @soc_info:              soc info structure pointer
+ * @cpas_ife_base_offset:  cpas ife force clock base reg offset value
+ *
+ */
+int cam_ife_csid_disable_ife_force_clock_on(struct cam_hw_soc_info *soc_info,
+	uint32_t cpas_ife_base_offset);
+
+
+
 #endif
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
index a70707a..37e0ce3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -151,6 +151,7 @@ struct cam_csid_get_time_stamp_args {
  */
 enum cam_ife_csid_cmd_type {
 	CAM_IFE_CSID_CMD_GET_TIME_STAMP,
+	CAM_IFE_CSID_SET_CSID_DEBUG,
 	CAM_IFE_CSID_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 60f1c8b..3a0c6a7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -17,6 +17,7 @@
 #include "cam_hw.h"
 #include "cam_soc_util.h"
 #include "cam_irq_controller.h"
+#include <uapi/media/cam_isp.h>
 
 /*
  * struct cam_isp_timestamp:
@@ -175,4 +176,19 @@ struct cam_isp_hw_get_hfr_update {
 	struct cam_isp_port_hfr_config *io_hfr_cfg;
 };
 
+/*
+ * struct cam_isp_hw_dual_isp_update_args:
+ *
+ * @Brief:        update the dual isp striping configuration.
+ *
+ * @ split_id:    spilt id to inform left or rifht
+ * @ res:         resource node
+ * @ dual_cfg:    dual isp configuration
+ *
+ */
+struct cam_isp_hw_dual_isp_update_args {
+	enum cam_isp_hw_split_id         split_id;
+	struct cam_isp_resource_node    *res;
+	struct cam_isp_dual_config      *dual_cfg;
+};
 #endif /* _CAM_ISP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index a64379c..96263de 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -50,6 +50,7 @@ enum cam_vfe_hw_cmd_type {
 	CAM_VFE_HW_CMD_GET_REG_UPDATE,
 	CAM_VFE_HW_CMD_GET_HFR_UPDATE,
 	CAM_VFE_HW_CMD_GET_SECURE_MODE,
+	CAM_VFE_HW_CMD_STRIPE_UPDATE,
 	CAM_VFE_HW_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 89db01d..77b830c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -179,6 +179,13 @@ int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
 
 	CAM_DBG(CAM_ISP, "Enable soc done");
 
+	/* Do HW Reset */
+	rc = cam_vfe_reset(hw_priv, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Reset Failed rc=%d", rc);
+		goto disable_soc;
+	}
+
 	rc = core_info->vfe_bus->hw_ops.init(core_info->vfe_bus->bus_priv,
 		NULL, 0);
 	if (rc) {
@@ -186,18 +193,7 @@ int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
 		goto disable_soc;
 	}
 
-	/* Do HW Reset */
-	rc = cam_vfe_reset(hw_priv, NULL, 0);
-	if (rc) {
-		CAM_ERR(CAM_ISP, "Reset Failed rc=%d", rc);
-		goto deinit_bus;
-	}
-
 	return 0;
-
-deinit_bus:
-	core_info->vfe_bus->hw_ops.deinit(core_info->vfe_bus->bus_priv,
-		NULL, 0);
 disable_soc:
 	cam_vfe_disable_soc_resources(soc_info);
 decrement_open_cnt:
@@ -561,6 +557,7 @@ int cam_vfe_process_cmd(void *hw_priv, uint32_t cmd_type,
 		break;
 	case CAM_VFE_HW_CMD_GET_BUF_UPDATE:
 	case CAM_VFE_HW_CMD_GET_HFR_UPDATE:
+	case CAM_VFE_HW_CMD_STRIPE_UPDATE:
 		rc = core_info->vfe_bus->hw_ops.process_cmd(
 			core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
 			arg_size);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index dea906e..e3a6f7b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -223,6 +223,9 @@ static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = {
 		.comp_ovrwr_status            = 0x00002070,
 		.dual_comp_error_status       = 0x00002074,
 		.dual_comp_error_status       = 0x00002078,
+		.addr_sync_cfg                = 0x0000207C,
+		.addr_sync_frame_hdr          = 0x00002080,
+		.addr_sync_no_sync            = 0x00002084,
 	},
 	.bus_client_reg = {
 		/* BUS Client 0 */
@@ -674,26 +677,34 @@ static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = {
 		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */
 		{
 			.comp_mask                    = 0x0000202C,
+			.addr_sync_mask               = 0x00002088,
 		},
 		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */
 		{
 			.comp_mask                    = 0x00002030,
+			.addr_sync_mask               = 0x0000208C,
+
 		},
 		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */
 		{
 			.comp_mask                    = 0x00002034,
+			.addr_sync_mask               = 0x00002090,
+
 		},
 		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */
 		{
 			.comp_mask                    = 0x00002038,
+			.addr_sync_mask               = 0x00002094,
 		},
 		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */
 		{
 			.comp_mask                    = 0x0000203C,
+			.addr_sync_mask               = 0x00002098,
 		},
 		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */
 		{
 			.comp_mask                    = 0x00002040,
+			.addr_sync_mask               = 0x0000209C,
 		},
 	},
 	.vfe_out_hw_info = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 1472e09..005d7e0 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -12,6 +12,7 @@
 
 #include <linux/ratelimit.h>
 #include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
 #include "cam_io_util.h"
 #include "cam_debug_util.h"
 #include "cam_cdm_util.h"
@@ -34,14 +35,18 @@ static const char drv_name[] = "vfe_bus";
 
 #define CAM_VFE_BUS_VER2_PAYLOAD_MAX             256
 
-#define CAM_VFE_RDI_BUS_DEFAULT_WIDTH           0xFF01
-#define CAM_VFE_RDI_BUS_DEFAULT_STRIDE          0xFF01
+#define CAM_VFE_RDI_BUS_DEFAULT_WIDTH               0xFF01
+#define CAM_VFE_RDI_BUS_DEFAULT_STRIDE              0xFF01
+#define CAM_VFE_BUS_INTRA_CLIENT_MASK               0x3
+#define CAM_VFE_BUS_ADDR_SYNC_INTRA_CLIENT_SHIFT    8
+#define CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL        0xFFFFF
 
 #define ALIGNUP(value, alignment) \
 	((value + alignment - 1) / alignment * alignment)
 
 #define MAX_BUF_UPDATE_REG_NUM   \
-	(sizeof(struct cam_vfe_bus_ver2_reg_offset_bus_client)/4)
+	((sizeof(struct cam_vfe_bus_ver2_reg_offset_bus_client) +  \
+	sizeof(struct cam_vfe_bus_ver2_reg_offset_ubwc_client))/4)
 #define MAX_REG_VAL_PAIR_SIZE    \
 	(MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
 
@@ -97,6 +102,7 @@ struct cam_vfe_bus_ver2_common_data {
 	struct mutex                                bus_mutex;
 	uint32_t                                    secure_mode;
 	uint32_t                                    num_sec_out;
+	uint32_t                                    addr_no_sync;
 };
 
 struct cam_vfe_bus_ver2_wm_resource_data {
@@ -133,6 +139,7 @@ struct cam_vfe_bus_ver2_wm_resource_data {
 	uint32_t             framedrop_pattern;
 
 	uint32_t             en_cfg;
+	uint32_t             is_dual;
 };
 
 struct cam_vfe_bus_ver2_comp_grp_data {
@@ -148,6 +155,7 @@ struct cam_vfe_bus_ver2_comp_grp_data {
 	uint32_t                         dual_slave_core;
 	uint32_t                         intra_client_mask;
 	uint32_t                         composite_mask;
+	uint32_t                         addr_sync_mode;
 
 	uint32_t                         acquire_dev_cnt;
 	uint32_t                         irq_trigger_cnt;
@@ -284,7 +292,7 @@ static int cam_vfe_bus_ver2_get_intra_client_mask(
 	case CAM_VFE_BUS_VER2_VFE_CORE_0:
 		switch (dual_slave_core) {
 		case CAM_VFE_BUS_VER2_VFE_CORE_1:
-			*intra_client_mask = 0x1;
+			*intra_client_mask = 0x3;
 			break;
 		case CAM_VFE_BUS_VER2_VFE_CORE_2:
 			*intra_client_mask = 0x2;
@@ -770,10 +778,10 @@ static int cam_vfe_bus_acquire_wm(
 	void                                  *ctx,
 	enum cam_vfe_bus_ver2_vfe_out_type     vfe_out_res_id,
 	enum cam_vfe_bus_plane_type            plane,
-	enum cam_isp_hw_split_id               split_id,
 	uint32_t                               subscribe_irq,
 	struct cam_isp_resource_node         **wm_res,
-	uint32_t                              *client_done_mask)
+	uint32_t                              *client_done_mask,
+	uint32_t                               is_dual)
 {
 	uint32_t wm_idx = 0;
 	struct cam_isp_resource_node              *wm_res_local = NULL;
@@ -802,6 +810,9 @@ static int cam_vfe_bus_acquire_wm(
 
 	rsrc_data->width = out_port_info->width;
 	rsrc_data->height = out_port_info->height;
+	rsrc_data->is_dual = is_dual;
+	/* Set WM offset value to default */
+	rsrc_data->offset  = 0;
 	CAM_DBG(CAM_ISP, "WM %d width %d height %d", rsrc_data->index,
 		rsrc_data->width, rsrc_data->height);
 
@@ -815,6 +826,7 @@ static int cam_vfe_bus_acquire_wm(
 		case CAM_FORMAT_MIPI_RAW_14:
 		case CAM_FORMAT_MIPI_RAW_16:
 		case CAM_FORMAT_MIPI_RAW_20:
+		case CAM_FORMAT_PLAIN128:
 			rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH;
 			rsrc_data->height = 0;
 			rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
@@ -859,10 +871,6 @@ static int cam_vfe_bus_acquire_wm(
 			rsrc_data->en_cfg = 0x1;
 			rsrc_data->pack_fmt = 0xA;
 			break;
-		case CAM_FORMAT_PLAIN128:
-			rsrc_data->en_cfg = 0x1;
-			rsrc_data->pack_fmt = 0x0;
-			break;
 		default:
 			CAM_ERR(CAM_ISP, "Unsupported RDI format %d",
 				rsrc_data->format);
@@ -990,6 +998,7 @@ static int cam_vfe_bus_release_wm(void   *bus_priv,
 	rsrc_data->init_cfg_done = false;
 	rsrc_data->hfr_cfg_done = false;
 	rsrc_data->en_cfg = 0;
+	rsrc_data->is_dual = 0;
 
 	wm_res->tasklet_info = NULL;
 	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
@@ -1280,6 +1289,7 @@ static int cam_vfe_bus_acquire_comp_grp(
 	if (!comp_grp_local) {
 		/* First find a free group */
 		if (is_dual) {
+			CAM_DBG(CAM_ISP, "Acquire dual comp group");
 			if (list_empty(&ver2_bus_priv->free_dual_comp_grp)) {
 				CAM_ERR(CAM_ISP, "No Free Composite Group");
 				return -ENODEV;
@@ -1292,7 +1302,10 @@ static int cam_vfe_bus_acquire_comp_grp(
 				dual_slave_core,
 				comp_grp_local->hw_intf->hw_idx,
 				&rsrc_data->intra_client_mask);
+			if (rc)
+				return rc;
 		} else {
+			CAM_DBG(CAM_ISP, "Acquire comp group");
 			if (list_empty(&ver2_bus_priv->free_comp_grp)) {
 				CAM_ERR(CAM_ISP, "No Free Composite Group");
 				return -ENODEV;
@@ -1312,6 +1325,11 @@ static int cam_vfe_bus_acquire_comp_grp(
 		rsrc_data->unique_id = unique_id;
 		rsrc_data->comp_grp_local_idx = bus_comp_grp_id;
 
+		if (is_master)
+			rsrc_data->addr_sync_mode = 0;
+		else
+			rsrc_data->addr_sync_mode = 1;
+
 		list_add_tail(&comp_grp_local->list,
 			&ver2_bus_priv->used_comp_grp);
 
@@ -1327,6 +1345,8 @@ static int cam_vfe_bus_acquire_comp_grp(
 		}
 	}
 
+	CAM_DBG(CAM_ISP, "Comp Grp type %u", rsrc_data->comp_grp_type);
+
 	rsrc_data->ctx = ctx;
 	rsrc_data->acquire_dev_cnt++;
 	*comp_grp = comp_grp_local;
@@ -1359,6 +1379,7 @@ static int cam_vfe_bus_release_comp_grp(
 	}
 
 	in_rsrc_data = in_comp_grp->res_priv;
+	CAM_DBG(CAM_ISP, "Comp Grp type %u", in_rsrc_data->comp_grp_type);
 
 	list_for_each_entry(comp_grp, &ver2_bus_priv->used_comp_grp, list) {
 		if (comp_grp == in_comp_grp) {
@@ -1381,6 +1402,7 @@ static int cam_vfe_bus_release_comp_grp(
 		in_rsrc_data->comp_grp_local_idx = CAM_VFE_BUS_COMP_GROUP_NONE;
 		in_rsrc_data->composite_mask = 0;
 		in_rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX;
+		in_rsrc_data->addr_sync_mode = 0;
 
 		comp_grp->tasklet_info = NULL;
 		comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
@@ -1405,55 +1427,103 @@ static int cam_vfe_bus_release_comp_grp(
 static int cam_vfe_bus_start_comp_grp(struct cam_isp_resource_node *comp_grp)
 {
 	int rc = 0;
+	uint32_t addr_sync_cfg;
 	struct cam_vfe_bus_ver2_comp_grp_data      *rsrc_data =
 		comp_grp->res_priv;
 	struct cam_vfe_bus_ver2_common_data        *common_data =
 		rsrc_data->common_data;
 	uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
 
+	CAM_DBG(CAM_ISP, "comp group id:%d streaming state:%d",
+		rsrc_data->comp_grp_type, comp_grp->res_state);
+
 	cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
 		rsrc_data->hw_regs->comp_mask);
+	if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+		return 0;
 
 	CAM_DBG(CAM_ISP, "composite_mask is 0x%x", rsrc_data->composite_mask);
 	CAM_DBG(CAM_ISP, "composite_mask addr 0x%x",
 		rsrc_data->hw_regs->comp_mask);
 
 	if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
-		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 &&
-		rsrc_data->is_master) {
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) {
 		int dual_comp_grp = (rsrc_data->comp_grp_type -
 			CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0);
-		int intra_client_en = cam_io_r_mb(common_data->mem_base +
-			common_data->common_reg->dual_master_comp_cfg);
 
-		/* 2 Bits per comp_grp. Hence left shift by comp_grp * 2 */
-		intra_client_en |=
-			(rsrc_data->intra_client_mask << dual_comp_grp * 2);
+		if (rsrc_data->is_master) {
+			int intra_client_en = cam_io_r_mb(
+				common_data->mem_base +
+				common_data->common_reg->dual_master_comp_cfg);
 
-		cam_io_w_mb(intra_client_en, common_data->mem_base +
-			common_data->common_reg->dual_master_comp_cfg);
+			/*
+			 * 2 Bits per comp_grp. Hence left shift by
+			 * comp_grp * 2
+			 */
+			intra_client_en |=
+				(rsrc_data->intra_client_mask <<
+					(dual_comp_grp * 2));
 
-		bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG2] = (1 << dual_comp_grp);
+			cam_io_w_mb(intra_client_en, common_data->mem_base +
+				common_data->common_reg->dual_master_comp_cfg);
+
+			bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG2] =
+				(1 << dual_comp_grp);
+		}
+
+		CAM_DBG(CAM_ISP, "addr_sync_mask addr 0x%x",
+			rsrc_data->hw_regs->addr_sync_mask);
+		cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
+			rsrc_data->hw_regs->addr_sync_mask);
+
+		addr_sync_cfg = cam_io_r_mb(common_data->mem_base +
+			common_data->common_reg->addr_sync_cfg);
+		addr_sync_cfg |= (rsrc_data->addr_sync_mode << dual_comp_grp);
+		/*
+		 * 2 Bits per dual_comp_grp. dual_comp_grp stats at bit number
+		 * 8. Hence left shift cdual_comp_grp dual comp_grp * 2 and
+		 * add 8
+		 */
+		addr_sync_cfg |=
+			(rsrc_data->intra_client_mask <<
+				((dual_comp_grp * 2) +
+				CAM_VFE_BUS_ADDR_SYNC_INTRA_CLIENT_SHIFT));
+		cam_io_w_mb(addr_sync_cfg, common_data->mem_base +
+			common_data->common_reg->addr_sync_cfg);
+
+		common_data->addr_no_sync &= ~(rsrc_data->composite_mask);
+		cam_io_w_mb(common_data->addr_no_sync, common_data->mem_base +
+			common_data->common_reg->addr_sync_no_sync);
+		CAM_DBG(CAM_ISP, "addr_sync_cfg: 0x%x addr_no_sync_cfg: 0x%x",
+			addr_sync_cfg, common_data->addr_no_sync);
 	} else {
 		/* IRQ bits for COMP GRP start at 5. So add 5 to the shift */
 		bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG0] =
 			(1 << (rsrc_data->comp_grp_type + 5));
 	}
 
-	/* Subscribe IRQ */
+	/*
+	 * For Dual composite subscribe IRQ only for master
+	 * For regular composite, subscribe IRQ always
+	 */
 	CAM_DBG(CAM_ISP, "Subscribe COMP_GRP%d IRQ", rsrc_data->comp_grp_type);
-	comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
-		common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
-		bus_irq_reg_mask, comp_grp,
-		comp_grp->top_half_handler,
-		cam_ife_mgr_do_tasklet_buf_done,
-		comp_grp->tasklet_info, cam_tasklet_enqueue_cmd);
-	if (comp_grp->irq_handle < 0) {
-		CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
-			rsrc_data->comp_grp_type);
-		return -EFAULT;
+	if (((rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) &&
+		(rsrc_data->is_master)) ||
+		(rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)) {
+		comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
+			common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+			bus_irq_reg_mask, comp_grp,
+			comp_grp->top_half_handler,
+			cam_ife_mgr_do_tasklet_buf_done,
+			comp_grp->tasklet_info, cam_tasklet_enqueue_cmd);
+		if (comp_grp->irq_handle < 0) {
+			CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
+				rsrc_data->comp_grp_type);
+			return -EFAULT;
+		}
 	}
-
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
 	return rc;
@@ -1462,32 +1532,65 @@ static int cam_vfe_bus_start_comp_grp(struct cam_isp_resource_node *comp_grp)
 static int cam_vfe_bus_stop_comp_grp(struct cam_isp_resource_node *comp_grp)
 {
 	int rc = 0;
+	uint32_t addr_sync_cfg;
 	struct cam_vfe_bus_ver2_comp_grp_data      *rsrc_data =
 		comp_grp->res_priv;
 	struct cam_vfe_bus_ver2_common_data        *common_data =
 		rsrc_data->common_data;
 
 	/* Unsubscribe IRQ */
-	rc = cam_irq_controller_unsubscribe_irq(
-		common_data->bus_irq_controller,
-		comp_grp->irq_handle);
+	if (((rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) &&
+		(rsrc_data->is_master)) ||
+		(rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			common_data->bus_irq_controller,
+			comp_grp->irq_handle);
+	}
 
 	cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
 		rsrc_data->hw_regs->comp_mask);
 	if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
-		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 &&
-		rsrc_data->is_master) {
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) {
+
 		int dual_comp_grp = (rsrc_data->comp_grp_type -
 			CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0);
-		int intra_client_en = cam_io_r_mb(common_data->mem_base +
-			common_data->common_reg->dual_master_comp_cfg);
 
-		/* 2 Bits per comp_grp. Hence left shift by comp_grp * 2 */
-		intra_client_en &=
-			~(rsrc_data->intra_client_mask << dual_comp_grp * 2);
+		if (rsrc_data->is_master) {
+			int intra_client_en = cam_io_r_mb(
+				common_data->mem_base +
+				common_data->common_reg->dual_master_comp_cfg);
 
-		cam_io_w_mb(intra_client_en, common_data->mem_base +
-			common_data->common_reg->dual_master_comp_cfg);
+			/*
+			 * 2 Bits per comp_grp. Hence left shift by
+			 * comp_grp * 2
+			 */
+			intra_client_en &=
+				~(rsrc_data->intra_client_mask <<
+					dual_comp_grp * 2);
+
+			cam_io_w_mb(intra_client_en, common_data->mem_base +
+				common_data->common_reg->dual_master_comp_cfg);
+		}
+
+		addr_sync_cfg = cam_io_r_mb(common_data->mem_base +
+			common_data->common_reg->addr_sync_cfg);
+		addr_sync_cfg &= ~(1 << dual_comp_grp);
+		addr_sync_cfg &= ~(CAM_VFE_BUS_INTRA_CLIENT_MASK <<
+			((dual_comp_grp * 2) +
+			CAM_VFE_BUS_ADDR_SYNC_INTRA_CLIENT_SHIFT));
+		cam_io_w_mb(addr_sync_cfg, common_data->mem_base +
+			common_data->common_reg->addr_sync_cfg);
+
+		cam_io_w_mb(0, common_data->mem_base +
+			rsrc_data->hw_regs->addr_sync_mask);
+		common_data->addr_no_sync |= rsrc_data->composite_mask;
+		cam_io_w_mb(common_data->addr_no_sync, common_data->mem_base +
+			common_data->common_reg->addr_sync_no_sync);
+		CAM_DBG(CAM_ISP, "addr_sync_cfg: 0x% addr_no_sync_cfg: 0x%x",
+			addr_sync_cfg, common_data->addr_no_sync);
+
 	}
 
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
@@ -1514,6 +1617,7 @@ static int cam_vfe_bus_handle_comp_done_top_half(uint32_t evt_id,
 
 	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
 	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_2 = %x", th_payload->evt_status_arr[2]);
 
 	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
 	if (rc) {
@@ -1636,7 +1740,7 @@ static int cam_vfe_bus_handle_comp_done_bottom_half(
 			if (rsrc_data->irq_trigger_cnt ==
 				rsrc_data->acquire_dev_cnt) {
 				cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2] &=
-					~BIT(comp_grp_id + 5);
+					~BIT(comp_grp_id);
 				rsrc_data->irq_trigger_cnt = 0;
 			}
 			rc = CAM_VFE_IRQ_STATUS_SUCCESS;
@@ -1818,10 +1922,12 @@ static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args,
 	rsrc_data->cdm_util_ops = out_acquire_args->cdm_ops;
 
 	/* Reserve Composite Group */
-	if (num_wm > 1 || (out_acquire_args->out_port_info->comp_grp_id >
-		CAM_ISP_RES_COMP_GROUP_NONE &&
-		out_acquire_args->out_port_info->comp_grp_id <
-		CAM_ISP_RES_COMP_GROUP_ID_MAX)) {
+		if (num_wm > 1 || (out_acquire_args->is_dual) ||
+			(out_acquire_args->out_port_info->comp_grp_id >
+			CAM_ISP_RES_COMP_GROUP_NONE &&
+			out_acquire_args->out_port_info->comp_grp_id <
+			CAM_ISP_RES_COMP_GROUP_ID_MAX)) {
+
 		rc = cam_vfe_bus_acquire_comp_grp(ver2_bus_priv,
 			out_acquire_args->out_port_info,
 			acq_args->tasklet,
@@ -1852,10 +1958,10 @@ static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args,
 			out_acquire_args->ctx,
 			vfe_out_res_id,
 			i,
-			out_acquire_args->split_id,
 			subscribe_irq,
 			&rsrc_data->wm_res[i],
-			&client_done_mask);
+			&client_done_mask,
+			out_acquire_args->is_dual);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
 				"VFE%d WM acquire failed for Out %d rc=%d",
@@ -1980,28 +2086,6 @@ static int cam_vfe_bus_start_vfe_out(
 	if (rsrc_data->comp_grp)
 		rc = cam_vfe_bus_start_comp_grp(rsrc_data->comp_grp);
 
-	/* BUS_WR_INPUT_IF_ADDR_SYNC_CFG */
-	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000207C);
-	/*  BUS_WR_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
-	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x00002080);
-	/* BUS_WR_INPUT_IF_ADDR_SYNC_NO_SYNC */
-	cam_io_w_mb(0xFFFFF, rsrc_data->common_data->mem_base + 0x00002084);
-	/*  BUS_WR_INPUT_IF_ADDR_SYNC_0 */
-	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x00002088);
-	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000208c);
-	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x00002090);
-	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x00002094);
-	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x00002098);
-	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000209c);
-	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x000020a0);
-	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x000020a4);
-
-	/* no clock gating at bus input */
-	cam_io_w_mb(0xFFFFF, rsrc_data->common_data->mem_base + 0x0000200C);
-
-	/* BUS_WR_TEST_BUS_CTRL */
-	cam_io_w_mb(0x0, rsrc_data->common_data->mem_base + 0x0000211C);
-
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 	return rc;
 }
@@ -2030,7 +2114,6 @@ static int cam_vfe_bus_stop_vfe_out(
 	for (i = 0; i < rsrc_data->num_wm; i++)
 		rc = cam_vfe_bus_stop_wm(rsrc_data->wm_res[i]);
 
-
 	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
 	return rc;
 }
@@ -2149,11 +2232,6 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
 	uint32_t  i, j, size = 0;
 	uint32_t  frame_inc = 0;
 
-	/*
-	 * Need the entire buf io config so we can get the stride info
-	 * for the wm.
-	 */
-
 	bus_priv = (struct cam_vfe_bus_ver2_priv  *) priv;
 	update_buf =  (struct cam_isp_hw_get_buf_update *) cmd_args;
 
@@ -2185,6 +2263,12 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
 
 		wm_data = vfe_out_data->wm_res[i]->res_priv;
 
+		/* update width register */
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->buffer_width_cfg,
+			wm_data->width);
+		CAM_DBG(CAM_ISP, "image width 0x%x", wm_data->width);
+
 		/* For initial configuration program all bus registers */
 		if ((wm_data->stride != io_cfg->planes[i].plane_stride ||
 			!wm_data->init_cfg_done) && (wm_data->index >= 3)) {
@@ -2255,7 +2339,12 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
 			CAM_DBG(CAM_ISP, "packer cfg 0x%x",
 				wm_data->packer_cfg);
 
-			if (wm_data->tile_cfg != io_cfg->planes[i].tile_config
+			if (wm_data->is_dual) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->tile_cfg,
+					wm_data->tile_cfg);
+			} else if ((wm_data->tile_cfg !=
+				io_cfg->planes[i].tile_config)
 				|| !wm_data->init_cfg_done) {
 				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
 					wm_data->hw_regs->ubwc_regs->tile_cfg,
@@ -2265,7 +2354,22 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
 			}
 			CAM_DBG(CAM_ISP, "tile cfg 0x%x", wm_data->tile_cfg);
 
-			if (wm_data->h_init != io_cfg->planes[i].h_init ||
+			if (wm_data->is_dual) {
+				if ((wm_data->h_init != wm_data->offset) ||
+					!wm_data->init_cfg_done) {
+				/*
+				 * For dual ife h init value need to take from
+				 * offset.Striping config update offset value
+				 */
+					CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair,
+						j,
+						wm_data->hw_regs->ubwc_regs->
+						h_init,
+						wm_data->offset);
+					wm_data->h_init = wm_data->offset;
+				}
+			} else if (wm_data->h_init !=
+				io_cfg->planes[i].h_init ||
 				!wm_data->init_cfg_done) {
 				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
 					wm_data->hw_regs->ubwc_regs->h_init,
@@ -2337,8 +2441,9 @@ static int cam_vfe_bus_update_buf(void *priv, void *cmd_args,
 				io_cfg->planes[i].meta_size));
 		else
 			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
-				wm_data->hw_regs->image_addr,
-				update_buf->image_buf[i]);
+			wm_data->hw_regs->image_addr,
+			update_buf->image_buf[i] +
+			wm_data->offset);
 
 		CAM_DBG(CAM_ISP, "image address 0x%x", reg_val_pair[j-1]);
 
@@ -2478,6 +2583,50 @@ static int cam_vfe_bus_update_hfr(void *priv, void *cmd_args,
 	return 0;
 }
 
+static int cam_vfe_bus_update_stripe_cfg(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver2_priv                *bus_priv;
+	struct cam_isp_hw_dual_isp_update_args      *stripe_args;
+	struct cam_vfe_bus_ver2_vfe_out_data        *vfe_out_data = NULL;
+	struct cam_vfe_bus_ver2_wm_resource_data    *wm_data = NULL;
+	struct cam_isp_dual_stripe_config           *stripe_config;
+	uint32_t outport_id, ports_plane_idx, i;
+
+	bus_priv = (struct cam_vfe_bus_ver2_priv  *) priv;
+	stripe_args = (struct cam_isp_hw_dual_isp_update_args *)cmd_args;
+
+	vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
+		stripe_args->res->res_priv;
+
+	if (!vfe_out_data) {
+		CAM_ERR(CAM_ISP, "Failed! Invalid data");
+		return -EINVAL;
+	}
+
+	outport_id = stripe_args->res->res_id & 0xFF;
+	if (stripe_args->res->res_id < CAM_ISP_IFE_OUT_RES_BASE ||
+		stripe_args->res->res_id >= CAM_ISP_IFE_OUT_RES_MAX)
+		return 0;
+
+	ports_plane_idx = (stripe_args->split_id *
+	(stripe_args->dual_cfg->num_ports * CAM_PACKET_MAX_PLANES)) +
+	(outport_id * CAM_PACKET_MAX_PLANES);
+	for (i = 0; i < vfe_out_data->num_wm; i++) {
+		wm_data = vfe_out_data->wm_res[i]->res_priv;
+		stripe_config = (struct cam_isp_dual_stripe_config  *)
+			&stripe_args->dual_cfg->stripes[ports_plane_idx + i];
+		wm_data->width = stripe_config->width;
+		wm_data->offset = stripe_config->offset;
+		wm_data->tile_cfg = stripe_config->tileconfig;
+		CAM_DBG(CAM_ISP, "id:%x wm:%d width:0x%x offset:%x tilecfg:%x",
+			stripe_args->res->res_id, i, wm_data->width,
+			wm_data->offset, wm_data->tile_cfg);
+	}
+
+	return 0;
+}
+
 static int cam_vfe_bus_start_hw(void *hw_priv,
 	void *start_hw_args, uint32_t arg_size)
 {
@@ -2518,6 +2667,21 @@ static int cam_vfe_bus_init_hw(void *hw_priv,
 		return -EFAULT;
 	}
 
+	/* BUS_WR_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
+	cam_io_w_mb(0x0, bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->addr_sync_frame_hdr);
+
+	/* no clock gating at bus input */
+	cam_io_w_mb(0xFFFFF, bus_priv->common_data.mem_base + 0x0000200C);
+
+	/* BUS_WR_TEST_BUS_CTRL */
+	cam_io_w_mb(0x0, bus_priv->common_data.mem_base + 0x0000211C);
+
+	/* if addr_no_sync has default value then config the addr no sync reg */
+	cam_io_w_mb(CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL,
+		bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->addr_sync_no_sync);
+
 	return 0;
 }
 
@@ -2568,6 +2732,9 @@ static int cam_vfe_bus_process_cmd(
 	case CAM_VFE_HW_CMD_GET_SECURE_MODE:
 		rc = cam_vfe_bus_get_secure_mode(priv, cmd_args, arg_size);
 		break;
+	case CAM_VFE_HW_CMD_STRIPE_UPDATE:
+		rc = cam_vfe_bus_update_stripe_cfg(priv, cmd_args, arg_size);
+		break;
 	default:
 		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid camif process command:%d",
 			cmd_type);
@@ -2624,6 +2791,9 @@ int cam_vfe_bus_ver2_init(
 	bus_priv->common_data.hw_intf            = hw_intf;
 	bus_priv->common_data.vfe_irq_controller = vfe_irq_controller;
 	bus_priv->common_data.common_reg         = &ver2_hw_info->common_reg;
+	bus_priv->common_data.addr_no_sync       =
+		CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL;
+
 	mutex_init(&bus_priv->common_data.bus_mutex);
 
 	rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
index ba98077..c90d4ce 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
@@ -81,8 +81,8 @@ struct cam_vfe_bus_ver2_reg_offset_common {
 	uint32_t dual_comp_error_status;
 	uint32_t dual_comp_ovrwr_status;
 	uint32_t addr_sync_cfg;
-	uint32_t addr_syn_frame_hdr;
-	uint32_t addr_syn_no_sync;
+	uint32_t addr_sync_frame_hdr;
+	uint32_t addr_sync_no_sync;
 };
 
 /*
@@ -130,9 +130,12 @@ struct cam_vfe_bus_ver2_reg_offset_bus_client {
  * struct cam_vfe_bus_ver2_reg_offset_comp_grp:
  *
  * @Brief:        Register offsets for Composite Group registers
+ * comp_mask:     Comp group register address
+ * addr_sync_mask:Address sync group register address
  */
 struct cam_vfe_bus_ver2_reg_offset_comp_grp {
 	uint32_t                            comp_mask;
+	uint32_t                            addr_sync_mask;
 };
 
 /*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index f255691..e81a9f2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -158,8 +158,14 @@ static int cam_vfe_camif_resource_start(
 	/*config vfe core*/
 	val = (rsrc_data->pix_pattern <<
 			rsrc_data->reg_data->pixel_pattern_shift);
+	if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		val |= (1 << rsrc_data->reg_data->extern_reg_update_shift);
+
 	cam_io_w_mb(val, rsrc_data->mem_base + rsrc_data->common_reg->core_cfg);
 
+	CAM_DBG(CAM_ISP, "hw id:%d core_cfg val:%d", camif_res->hw_intf->hw_idx,
+		val);
+
 	cam_io_w_mb(0x00400040, rsrc_data->mem_base +
 		rsrc_data->camif_reg->camif_config);
 	cam_io_w_mb(0x1, rsrc_data->mem_base +
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
index 461e3c3..797873c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -67,7 +67,7 @@ static int cam_vfe_rdi_get_reg_update(
 	rsrc_data = rdi_res->res_priv;
 	reg_val_pair[0] = rsrc_data->rdi_reg->reg_update_cmd;
 	reg_val_pair[1] = rsrc_data->reg_data->reg_update_cmd_data;
-	CAM_DBG(CAM_ISP, "Error - RDI%d reg_update_cmd %x",
+	CAM_DBG(CAM_ISP, "RDI%d reg_update_cmd %x",
 		rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0, reg_val_pair[1]);
 
 	cdm_util_ops->cdm_write_regrandom(cdm_args->cmd_buf_addr,
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 681504d..8565ad1 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -180,6 +180,15 @@ static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data)
 		tbl->pd, curr_idx, tbl->slot[curr_idx].state,
 		tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status);
 
+	if (tbl->inject_delay > 0) {
+		CAM_DBG(CAM_CRM, "Injecting Delay of one frame");
+		apply_data[tbl->pd].req_id = -1;
+		tbl->inject_delay--;
+		/* This pd table is not ready to proceed with asked idx */
+		SET_FAILURE_BIT(traverse_data->result, tbl->pd);
+		return -EAGAIN;
+	}
+
 	/* Check if req is ready or in skip mode or pd tbl is in skip mode */
 	if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY ||
 		traverse_data->in_q->slot[curr_idx].skip_idx == 1 ||
@@ -1210,6 +1219,10 @@ int cam_req_mgr_process_add_req(void *priv, void *data)
 		mutex_unlock(&link->req.lock);
 		goto end;
 	}
+
+	if (add_req->skip_before_applying > tbl->inject_delay)
+		tbl->inject_delay = add_req->skip_before_applying;
+
 	slot = &tbl->slot[idx];
 	if (slot->state != CRM_REQ_STATE_PENDING &&
 		slot->state != CRM_REQ_STATE_EMPTY) {
@@ -1452,6 +1465,7 @@ static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
 	dev_req->req_id = add_req->req_id;
 	dev_req->link_hdl = add_req->link_hdl;
 	dev_req->dev_hdl = add_req->dev_hdl;
+	dev_req->skip_before_applying = add_req->skip_before_applying;
 	task->process_cb = &cam_req_mgr_process_add_req;
 	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
 	CAM_DBG(CAM_CRM, "X: dev %x dev req %lld",
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index e45d634..3687a5a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -173,6 +173,7 @@ struct cam_req_mgr_tbl_slot {
  * @pd_delta      : differnce between this table's pipeline delay and next
  * @num_slots     : number of request slots present in the table
  * @slot          : array of slots tracking requests availability at devices
+ * @inject_delay  : insert extra bubbling for flash type of use cases
  */
 struct cam_req_mgr_req_tbl {
 	int32_t                     id;
@@ -184,6 +185,7 @@ struct cam_req_mgr_req_tbl {
 	int32_t                     pd_delta;
 	int32_t                     num_slots;
 	struct cam_req_mgr_tbl_slot slot[MAX_REQ_SLOTS];
+	uint32_t                    inject_delay;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
index f61c41e..475b640 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
@@ -15,13 +15,9 @@
 #define CRM_TRACE_ENABLE 0
 #define CRM_DEBUG_MUTEX 0
 
-#define SET_SUCCESS_BIT(ret, pd)	{\
-	(ret) |= (1 << (pd));	\
-	}
+#define SET_SUCCESS_BIT(ret, pd)  (ret |= (1 << (pd)))
 
-#define SET_FAILURE_BIT(ret, pd)	{\
-	(ret) &= (0 << (pd));	\
-	}
+#define SET_FAILURE_BIT(ret, pd)  (ret &= (~(1 << (pd))))
 
 #define CRM_GET_REQ_ID(in_q, idx) in_q->slot[idx].req_id
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
index 6195b59..ce8dfa7 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
@@ -216,15 +216,18 @@ struct cam_req_mgr_error_notify {
 
 /**
  * struct cam_req_mgr_add_request
- * @link_hdl : link identifier
- * @dev_hdl  : device handle which has sent this req id
- * @req_id   : req id which device is ready to process
- *
+ * @link_hdl             : link identifier
+ * @dev_hdl              : device handle which has sent this req id
+ * @req_id               : req id which device is ready to process
+ * @skip_before_applying : before applying req mgr introduce bubble
+ *                         by not sending request to device/s.
+ *                         ex: IFE and Flash
  */
 struct cam_req_mgr_add_request {
 	int32_t  link_hdl;
 	int32_t  dev_hdl;
 	uint64_t req_id;
+	uint32_t skip_before_applying;
 };
 
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index 8ffa0ff..c3475b6 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -93,6 +93,8 @@ int32_t cam_actuator_slaveInfo_pkt_parser(struct cam_actuator_ctrl_t *a_ctrl,
 
 	i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
 	if (a_ctrl->io_master_info.master_type == CCI_MASTER) {
+		a_ctrl->io_master_info.cci_client->cci_i2c_master =
+			a_ctrl->cci_i2c_master;
 		a_ctrl->io_master_info.cci_client->i2c_freq_mode =
 			i2c_info->i2c_freq_mode;
 		a_ctrl->io_master_info.cci_client->sid =
@@ -370,6 +372,7 @@ int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
 		add_req.link_hdl = a_ctrl->bridge_intf.link_hdl;
 		add_req.req_id = csl_packet->header.request_id;
 		add_req.dev_hdl = a_ctrl->bridge_intf.device_hdl;
+		add_req.skip_before_applying = 0;
 		if (a_ctrl->bridge_intf.crm_cb &&
 			a_ctrl->bridge_intf.crm_cb->add_req)
 			a_ctrl->bridge_intf.crm_cb->add_req(&add_req);
@@ -554,7 +557,7 @@ int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
 	case CAM_QUERY_CAP: {
 		struct cam_actuator_query_cap actuator_cap = {0};
 
-		actuator_cap.slot_info = a_ctrl->id;
+		actuator_cap.slot_info = a_ctrl->soc_info.index;
 		if (copy_to_user((void __user *) cmd->handle, &actuator_cap,
 			sizeof(struct cam_actuator_query_cap))) {
 			CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index b58f5d8..45dbba1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -286,8 +286,7 @@ static int32_t cam_actuator_driver_platform_probe(
 	}
 
 	/* Fill platform device id*/
-	a_ctrl->id = a_ctrl->soc_info.index;
-	pdev->id = a_ctrl->id;
+	pdev->id = a_ctrl->soc_info.index;
 
 	rc = cam_actuator_init_subdev(a_ctrl);
 	if (rc)
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
index fdf881f3..ad42a3d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -75,7 +75,6 @@ struct intf_params {
  * @cci_i2c_master: I2C structure
  * @io_master_info: Information about the communication master
  * @actuator_mutex: Actuator mutex
- * @id: Cell Index
  * @act_apply_state: Actuator settings aRegulator config
  * @gconf: GPIO config
  * @pinctrl_info: Pinctrl information
@@ -91,7 +90,6 @@ struct cam_actuator_ctrl_t {
 	struct camera_io_master io_master_info;
 	struct cam_hw_soc_info soc_info;
 	struct mutex actuator_mutex;
-	uint32_t id;
 	enum msm_actuator_state_t act_apply_state;
 	struct msm_camera_gpio_num_info *gpio_num_info;
 	uint8_t cam_pinctrl_status;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 1a3f947..57d576a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -18,11 +18,12 @@
 #include <soc/qcom/scm.h>
 #include <cam_mem_mgr.h>
 
-static int cam_csiphy_mem_dmp_param;
-module_param(cam_csiphy_mem_dmp_param, int, 0644);
 #define SCM_SVC_CAMERASS 0x18
 #define SECURE_SYSCALL_ID 0x6
 
+static int csiphy_dump;
+module_param(csiphy_dump, int, 0644);
+
 static int cam_csiphy_notify_secure_mode(int phy, bool protect)
 {
 	struct scm_desc desc = {0};
@@ -572,7 +573,7 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
 			goto release_mutex;
 		}
 		rc = cam_csiphy_config_dev(csiphy_dev);
-		if (cam_csiphy_mem_dmp_param == 1)
+		if (csiphy_dump == 1)
 			cam_csiphy_mem_dmp(&csiphy_dev->soc_info);
 
 		if (rc < 0) {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
index 506fc0e..d2a8467 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -14,6 +14,47 @@
 #include "cam_csiphy_core.h"
 #include "include/cam_csiphy_1_0_hwreg.h"
 
+#define BYTES_PER_REGISTER           4
+#define NUM_REGISTER_PER_LINE        4
+#define REG_OFFSET(__start, __i)    ((__start) + ((__i) * BYTES_PER_REGISTER))
+
+static int cam_io_phy_dump(void __iomem *base_addr,
+	uint32_t start_offset, int size)
+{
+	char          line_str[128];
+	char         *p_str;
+	int           i;
+	uint32_t      data;
+
+	CAM_INFO(CAM_CSIPHY, "addr=%pK offset=0x%x size=%d",
+		base_addr, start_offset, size);
+
+	if (!base_addr || (size <= 0))
+		return -EINVAL;
+
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size; i++) {
+		if (i % NUM_REGISTER_PER_LINE == 0) {
+			snprintf(p_str, 12, "0x%08x: ",
+				REG_OFFSET(start_offset, i));
+			p_str += 11;
+		}
+		data = readl_relaxed(base_addr + REG_OFFSET(start_offset, i));
+		snprintf(p_str, 9, "%08x ", data);
+		p_str += 8;
+		if ((i + 1) % NUM_REGISTER_PER_LINE == 0) {
+			CAM_ERR(CAM_CSIPHY, "%s", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		CAM_ERR(CAM_CSIPHY, "%s", line_str);
+
+	return 0;
+}
+
 int32_t cam_csiphy_mem_dmp(struct cam_hw_soc_info *soc_info)
 {
 	int32_t rc = 0;
@@ -27,7 +68,7 @@ int32_t cam_csiphy_mem_dmp(struct cam_hw_soc_info *soc_info)
 	}
 	addr = soc_info->reg_map[0].mem_base;
 	size = resource_size(soc_info->mem_block[0]);
-	rc = cam_io_dump(addr, 0, (size >> 2));
+	rc = cam_io_phy_dump(addr, 0, (size >> 2));
 	if (rc < 0) {
 		CAM_ERR(CAM_CSIPHY, "generating dump failed %d", rc);
 		return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index 6eab59a..a173954 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -754,7 +754,7 @@ int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
 	mutex_lock(&(e_ctrl->eeprom_mutex));
 	switch (cmd->op_code) {
 	case CAM_QUERY_CAP:
-		eeprom_cap.slot_info = e_ctrl->subdev_id;
+		eeprom_cap.slot_info = e_ctrl->soc_info.index;
 		if (e_ctrl->userspace_probe == false)
 			eeprom_cap.eeprom_kernel_probe = true;
 		else
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
index a98bf00..e06e22a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
@@ -154,7 +154,6 @@ struct cam_eeprom_intf_params {
  * @cci_i2c_master  :   I2C structure
  * @v4l2_dev_str    :   V4L2 device structure
  * @bridge_intf     :   bridge interface params
- * @subdev_id       :   subdev id
  * @userspace_probe :   flag indicates userspace or kernel probe
  * @cal_data        :   Calibration data
  * @device_name     :   Device name
@@ -171,7 +170,6 @@ struct cam_eeprom_ctrl_t {
 	struct cam_subdev v4l2_dev_str;
 	struct cam_eeprom_intf_params bridge_intf;
 	enum msm_camera_device_type_t eeprom_device_type;
-	uint32_t subdev_id;
 	bool userspace_probe;
 	struct cam_eeprom_memory_block_t cal_data;
 	char device_name[20];
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index f455715..b0fe293 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -634,6 +634,13 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
 		add_req.link_hdl = fctrl->bridge_intf.link_hdl;
 		add_req.req_id = csl_packet->header.request_id;
 		add_req.dev_hdl = fctrl->bridge_intf.device_hdl;
+
+		if ((csl_packet->header.op_code & 0xFFFFF) ==
+			CAM_FLASH_PACKET_OPCODE_SET_OPS)
+			add_req.skip_before_applying = 1;
+		else
+			add_req.skip_before_applying = 0;
+
 		if (fctrl->bridge_intf.crm_cb &&
 			fctrl->bridge_intf.crm_cb->add_req)
 			fctrl->bridge_intf.crm_cb->add_req(&add_req);
@@ -648,7 +655,7 @@ int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info)
 	info->dev_id = CAM_REQ_MGR_DEVICE_FLASH;
 	strlcpy(info->name, CAM_FLASH_NAME, sizeof(info->name));
 	info->p_delay = CAM_FLASH_PIPELINE_DELAY;
-	info->trigger = CAM_TRIGGER_POINT_EOF;
+	info->trigger = CAM_TRIGGER_POINT_SOF;
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index 72cabf1..0a283ab 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -496,7 +496,7 @@ int cam_ois_driver_cmd(struct cam_ois_ctrl_t *o_ctrl, void *arg)
 	mutex_lock(&(o_ctrl->ois_mutex));
 	switch (cmd->op_code) {
 	case CAM_QUERY_CAP:
-		ois_cap.slot_info = o_ctrl->subdev_id;
+		ois_cap.slot_info = o_ctrl->soc_info.index;
 
 		if (copy_to_user((void __user *) cmd->handle,
 			&ois_cap,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
index 95ebed1..794b65a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
@@ -95,7 +95,6 @@ struct cam_ois_intf_params {
  * @i2c_mode_data   :   ois i2c mode settings
  * @i2c_calib_data  :   ois i2c calib settings
  * @ois_device_type :   ois device type
- * @subdev_id       :   subdev id
  * @ois_name        :   ois name
  * @ois_fw_flag     :   flag for firmware download
  * @is_ois_calib    :   flag for Calibration data
@@ -116,7 +115,6 @@ struct cam_ois_ctrl_t {
 	struct i2c_settings_array i2c_calib_data;
 	struct i2c_settings_array i2c_mode_data;
 	enum msm_camera_device_type_t ois_device_type;
-	uint32_t subdev_id;
 	char device_name[20];
 	char ois_name[32];
 	uint8_t ois_fw_flag;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.c
index 916e699..5886413 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.c
@@ -90,13 +90,6 @@ int cam_ois_driver_soc_init(struct cam_ois_ctrl_t *o_ctrl)
 		return -EINVAL;
 	}
 
-	rc = of_property_read_u32(of_node, "cell-index",
-		&o_ctrl->subdev_id);
-	if (rc < 0) {
-		CAM_ERR(CAM_OIS, "failed rc %d", rc);
-		return rc;
-	}
-
 	if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
 		rc = of_property_read_u32(of_node, "cci-master",
 			&o_ctrl->cci_i2c_master);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index f0c1bca..cd96129 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -118,6 +118,7 @@ static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
 		CAM_DBG(CAM_SENSOR, " Rxed Req Id: %lld",
 			 csl_packet->header.request_id);
 		add_req.dev_hdl = s_ctrl->bridge_intf.device_hdl;
+		add_req.skip_before_applying = 0;
 		if (s_ctrl->bridge_intf.crm_cb &&
 			s_ctrl->bridge_intf.crm_cb->add_req)
 			s_ctrl->bridge_intf.crm_cb->add_req(&add_req);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index 6e169cf..8eb04ec 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -173,6 +173,9 @@ int32_t camera_io_release(struct camera_io_master *io_master_info)
 	if (io_master_info->master_type == CCI_MASTER) {
 		return cam_sensor_cci_i2c_util(io_master_info->cci_client,
 			MSM_CCI_RELEASE);
+	} else if ((io_master_info->master_type == I2C_MASTER) ||
+			(io_master_info->master_type == SPI_MASTER)) {
+		return 0;
 	} else {
 		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
 			io_master_info->master_type);
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index 44d5d48..aecce12 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -17,7 +17,7 @@
 #include "cam_packet_util.h"
 #include "cam_debug_util.h"
 
-static int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
+int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
 	size_t *len)
 {
 	int rc = 0;
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
index 323a75a..94d2693 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
@@ -38,6 +38,21 @@ typedef int (*cam_packet_generic_blob_handler)(void *user_data,
 	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data);
 
 /**
+ * cam_packet_util_get_cmd_mem_addr()
+ *
+ * @brief                  Get command buffer address
+ *
+ * @handle:                Command buffer memory handle
+ * @buf_addr:              Command buffer cpu mapped address
+ * @len:                   Command buffer length
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
+	size_t *len);
+
+/**
  * cam_packet_util_validate_packet()
  *
  * @brief                  Validate the packet
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index d749384..a54f028 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -2394,6 +2394,10 @@ static int sde_rotator_config_session(struct sde_rot_mgr *mgr,
 		config->input.format, config->output.width,
 		config->output.height, config->output.format,
 		config->frame_rate, perf->clk_rate, perf->bw);
+	SDEROT_EVTLOG(config->session_id, config->input.width,
+			config->input.height, config->input.format,
+			config->output.width, config->output.height,
+			config->output.format, config->frame_rate);
 done:
 	return ret;
 }
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 76c9367..f81cd2f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -2184,6 +2184,7 @@ static int sde_rotator_qbuf(struct file *file, void *fh,
 	if (ret < 0)
 		SDEDEV_ERR(ctx->rot_dev->dev, "fail qbuf s:%d t:%d r:%d\n",
 				ctx->session_id, buf->type, ret);
+	SDEROT_EVTLOG(buf->type, buf->bytesused, buf->length, buf->m.fd, ret);
 
 	return ret;
 }
@@ -2366,6 +2367,7 @@ static int sde_rotator_cropcap(struct file *file, void *fh,
 	a->pixelaspect.numerator = 1;
 	a->pixelaspect.denominator = 1;
 
+	SDEROT_EVTLOG(format->fmt.pix.width, format->fmt.pix.height, a->type);
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 3d39fa2..3206844 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -37,6 +37,7 @@
 #include "sde_rotator_debug.h"
 
 #define RES_UHD              (3840*2160)
+#define MS_TO_US(t) ((t) * USEC_PER_MSEC)
 
 /* traffic shaping clock ticks = finish_time x 19.2MHz */
 #define TRAFFIC_SHAPE_CLKTICK_14MS   268800
@@ -48,7 +49,7 @@
 #define XIN_WRITEBACK		1
 
 /* wait for at most 2 vsync for lowest refresh rate (24hz) */
-#define KOFF_TIMEOUT		(42 * 32)
+#define KOFF_TIMEOUT		(42 * 8)
 
 /* default stream buffer headroom in lines */
 #define DEFAULT_SBUF_HEADROOM	20
@@ -65,35 +66,54 @@
 	do { \
 		SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
 				(u32)(data));\
-		*p++ = REGDMA_OP_REGWRITE | \
-			((off) & REGDMA_ADDR_OFFSET_MASK); \
-		*p++ = (data); \
+		writel_relaxed_no_log( \
+				(REGDMA_OP_REGWRITE | \
+				 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
+				p); \
+		p += sizeof(u32); \
+		writel_relaxed_no_log(data, p); \
+		p += sizeof(u32); \
 	} while (0)
 
 #define SDE_REGDMA_MODIFY(p, off, mask, data) \
 	do { \
 		SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
 				(u32)(data));\
-		*p++ = REGDMA_OP_REGMODIFY | \
-			((off) & REGDMA_ADDR_OFFSET_MASK); \
-		*p++ = (mask); \
-		*p++ = (data); \
+		writel_relaxed_no_log( \
+				(REGDMA_OP_REGMODIFY | \
+				 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
+				p); \
+		p += sizeof(u32); \
+		writel_relaxed_no_log(mask, p); \
+		p += sizeof(u32); \
+		writel_relaxed_no_log(data, p); \
+		p += sizeof(u32); \
 	} while (0)
 
 #define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
 	do { \
 		SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
 				(u32)(len));\
-		*p++ = REGDMA_OP_BLKWRITE_INC | \
-			((off) & REGDMA_ADDR_OFFSET_MASK); \
-		*p++ = (len); \
+		writel_relaxed_no_log( \
+				(REGDMA_OP_BLKWRITE_INC | \
+				 ((off) & REGDMA_ADDR_OFFSET_MASK)), \
+				p); \
+		p += sizeof(u32); \
+		writel_relaxed_no_log(len, p); \
+		p += sizeof(u32); \
 	} while (0)
 
 #define SDE_REGDMA_BLKWRITE_DATA(p, data) \
 	do { \
 		SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
-		*(p) = (data); \
-		(p)++; \
+		writel_relaxed_no_log(data, p); \
+		p += sizeof(u32); \
+	} while (0)
+
+#define SDE_REGDMA_READ(p, data) \
+	do { \
+		data = readl_relaxed_no_log(p); \
+		p += sizeof(u32); \
 	} while (0)
 
 /* Macro for directly accessing mapped registers */
@@ -647,12 +667,99 @@ static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
 }
 
 /**
+ * sde_hw_rotator_reset - Reset rotator hardware
+ * @rot: pointer to hw rotator
+ * @ctx: pointer to current rotator context during the hw hang
+ */
+static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
+		struct sde_hw_rotator_context *ctx)
+{
+	struct sde_hw_rotator_context *rctx = NULL;
+	u32 int_mask = (REGDMA_INT_0_MASK | REGDMA_INT_1_MASK |
+			REGDMA_INT_2_MASK);
+	u32 last_ts[ROT_QUEUE_MAX] = {0,};
+	u32 latest_ts;
+	int elapsed_time, t;
+	int i, j;
+	unsigned long flags;
+
+	if (!rot || !ctx) {
+		SDEROT_ERR("NULL rotator context\n");
+		return -EINVAL;
+	}
+
+	if (ctx->q_id >= ROT_QUEUE_MAX) {
+		SDEROT_ERR("context q_id out of range: %d\n", ctx->q_id);
+		return -EINVAL;
+	}
+
+	/* sw reset the hw rotator */
+	SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 1);
+	usleep_range(MS_TO_US(10), MS_TO_US(20));
+	SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 0);
+
+	spin_lock_irqsave(&rot->rotisr_lock, flags);
+
+	/* update timestamp register with current context */
+	last_ts[ctx->q_id] = ctx->timestamp;
+	sde_hw_rotator_update_swts(rot, ctx, ctx->timestamp);
+	SDEROT_EVTLOG(ctx->timestamp);
+
+	/*
+	 * Search for any pending rot session, and look for last timestamp
+	 * per hw queue.
+	 */
+	for (i = 0; i < ROT_QUEUE_MAX; i++) {
+		latest_ts = atomic_read(&rot->timestamp[i]);
+		latest_ts &= SDE_REGDMA_SWTS_MASK;
+		elapsed_time = sde_hw_rotator_elapsed_swts(latest_ts,
+			last_ts[i]);
+
+		for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
+			rctx = rot->rotCtx[i][j];
+			if (rctx && rctx != ctx) {
+				rctx->last_regdma_isr_status = int_mask;
+				rctx->last_regdma_timestamp  = rctx->timestamp;
+
+				t = sde_hw_rotator_elapsed_swts(latest_ts,
+							rctx->timestamp);
+				if (t < elapsed_time) {
+					elapsed_time = t;
+					last_ts[i] = rctx->timestamp;
+					sde_hw_rotator_update_swts(rot, rctx,
+							last_ts[i]);
+				}
+
+				SDEROT_DBG("rotctx[%d][%d], ts:%d\n",
+						i, j, rctx->timestamp);
+				SDEROT_EVTLOG(i, j, rctx->timestamp,
+						last_ts[i]);
+			}
+		}
+	}
+
+	/* Finally wakeup all pending rotator context in queue */
+	for (i = 0; i < ROT_QUEUE_MAX; i++) {
+		for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
+			rctx = rot->rotCtx[i][j];
+			if (rctx && rctx != ctx)
+				wake_up_all(&rctx->regdma_waitq);
+		}
+	}
+
+	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
+
+	return 0;
+}
+
+/**
  * sde_hw_rotator_dump_status - Dump hw rotator status on error
  * @rot: Pointer to hw rotator
  */
-static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
+static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot, u32 *ubwcerr)
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	u32 reg = 0;
 
 	SDEROT_ERR(
 		"op_mode = %x, int_en = %x, int_status = %x\n",
@@ -681,9 +788,11 @@ static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
 		SDE_ROTREG_READ(rot->mdss_base,
 			REGDMA_CSR_REGDMA_FSM_STATE));
 
+	reg = SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS);
+	if (ubwcerr)
+		*ubwcerr = reg;
 	SDEROT_ERR(
-		"UBWC decode status = %x, UBWC encode status = %x\n",
-		SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS),
+		"UBWC decode status = %x, UBWC encode status = %x\n", reg,
 		SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
 
 	SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
@@ -851,7 +960,7 @@ static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
 static void sde_hw_rotator_setup_timestamp_packet(
 		struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
 {
-	u32 *wrptr;
+	char __iomem *wrptr;
 
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
@@ -908,7 +1017,7 @@ static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
 		struct sde_rot_cdp_params *params)
 {
 	int reg_val;
-	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+	char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
 	if (!params->enable) {
 		SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
@@ -942,7 +1051,7 @@ static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
 static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+	char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
 	/* Offline rotation setting */
 	if (!ctx->sbuf_mode) {
@@ -1000,7 +1109,7 @@ static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
 static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+	char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
 	/* Offline rotation setting */
 	if (!ctx->sbuf_mode) {
@@ -1070,7 +1179,7 @@ static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
 	struct sde_mdp_data *data;
 	struct sde_rot_cdp_params cdp_params = {0};
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	u32 *wrptr;
+	char __iomem *wrptr;
 	u32 opmode = 0;
 	u32 chroma_samp = 0;
 	u32 src_format = 0;
@@ -1291,7 +1400,7 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	struct sde_mdp_format_params *fmt;
 	struct sde_rot_cdp_params cdp_params = {0};
-	u32 *wrptr;
+	char __iomem *wrptr;
 	u32 pack = 0;
 	u32 dst_format = 0;
 	u32 no_partial_writes = 0;
@@ -1488,13 +1597,18 @@ static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
 		enum sde_rot_queue_prio queue_id)
 {
 	struct sde_hw_rotator *rot = ctx->rot;
-	u32 *wrptr;
-	u32 *rdptr;
-	u8 *addr;
+	char __iomem *wrptr;
+	char __iomem *mem_rdptr;
+	char __iomem *addr;
 	u32 mask;
+	u32 cmd0, cmd1, cmd2;
 	u32 blksize;
 
-	rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
+	/*
+	 * when regdma is not using, the regdma segment is just a normal
+	 * DRAM, and not an iomem.
+	 */
+	mem_rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
 	if (rot->irq_num >= 0) {
@@ -1512,54 +1626,65 @@ static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
 	SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
 	/* Write all command stream to Rotator blocks */
 	/* Rotator will start right away after command stream finish writing */
-	while (rdptr < wrptr) {
-		u32 op = REGDMA_OP_MASK & *rdptr;
+	while (mem_rdptr < wrptr) {
+		u32 op = REGDMA_OP_MASK & readl_relaxed_no_log(mem_rdptr);
 
 		switch (op) {
 		case REGDMA_OP_NOP:
 			SDEROT_DBG("NOP\n");
-			rdptr++;
+			mem_rdptr += sizeof(u32);
 			break;
 		case REGDMA_OP_REGWRITE:
+			SDE_REGDMA_READ(mem_rdptr, cmd0);
+			SDE_REGDMA_READ(mem_rdptr, cmd1);
 			SDEROT_DBG("REGW %6.6x %8.8x\n",
-					rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
-					rdptr[1]);
+					cmd0 & REGDMA_ADDR_OFFSET_MASK,
+					cmd1);
 			addr =  rot->mdss_base +
-				(*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
-			writel_relaxed(*rdptr++, addr);
+				(cmd0 & REGDMA_ADDR_OFFSET_MASK);
+			writel_relaxed(cmd1, addr);
 			break;
 		case REGDMA_OP_REGMODIFY:
+			SDE_REGDMA_READ(mem_rdptr, cmd0);
+			SDE_REGDMA_READ(mem_rdptr, cmd1);
+			SDE_REGDMA_READ(mem_rdptr, cmd2);
 			SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
-					rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
-					rdptr[1], rdptr[2]);
+					cmd0 & REGDMA_ADDR_OFFSET_MASK,
+					cmd1, cmd2);
 			addr =  rot->mdss_base +
-				(*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
-			mask = *rdptr++;
-			writel_relaxed((readl_relaxed(addr) & mask) | *rdptr++,
+				(cmd0 & REGDMA_ADDR_OFFSET_MASK);
+			mask = cmd1;
+			writel_relaxed((readl_relaxed(addr) & mask) | cmd2,
 					addr);
 			break;
 		case REGDMA_OP_BLKWRITE_SINGLE:
+			SDE_REGDMA_READ(mem_rdptr, cmd0);
+			SDE_REGDMA_READ(mem_rdptr, cmd1);
 			SDEROT_DBG("BLKWS %6.6x %6.6x\n",
-					rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
-					rdptr[1]);
+					cmd0 & REGDMA_ADDR_OFFSET_MASK,
+					cmd1);
 			addr =  rot->mdss_base +
-				(*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
-			blksize = *rdptr++;
+				(cmd0 & REGDMA_ADDR_OFFSET_MASK);
+			blksize = cmd1;
 			while (blksize--) {
-				SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
-				writel_relaxed(*rdptr++, addr);
+				SDE_REGDMA_READ(mem_rdptr, cmd0);
+				SDEROT_DBG("DATA %8.8x\n", cmd0);
+				writel_relaxed(cmd0, addr);
 			}
 			break;
 		case REGDMA_OP_BLKWRITE_INC:
+			SDE_REGDMA_READ(mem_rdptr, cmd0);
+			SDE_REGDMA_READ(mem_rdptr, cmd1);
 			SDEROT_DBG("BLKWI %6.6x %6.6x\n",
-					rdptr[0] & REGDMA_ADDR_OFFSET_MASK,
-					rdptr[1]);
+					cmd0 & REGDMA_ADDR_OFFSET_MASK,
+					cmd1);
 			addr =  rot->mdss_base +
-				(*rdptr++ & REGDMA_ADDR_OFFSET_MASK);
-			blksize = *rdptr++;
+				(cmd0 & REGDMA_ADDR_OFFSET_MASK);
+			blksize = cmd1;
 			while (blksize--) {
-				SDEROT_DBG("DATA %8.8x\n", rdptr[0]);
-				writel_relaxed(*rdptr++, addr);
+				SDE_REGDMA_READ(mem_rdptr, cmd0);
+				SDEROT_DBG("DATA %8.8x\n", cmd0);
+				writel_relaxed(cmd0, addr);
 				addr += 4;
 			}
 			break;
@@ -1568,7 +1693,7 @@ static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
 			 * Skip data for now for unregonized OP mode
 			 */
 			SDEROT_DBG("UNDEFINED\n");
-			rdptr++;
+			mem_rdptr += sizeof(u32);
 			break;
 		}
 	}
@@ -1586,11 +1711,11 @@ static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
 		enum sde_rot_queue_prio queue_id)
 {
 	struct sde_hw_rotator *rot = ctx->rot;
-	u32 *wrptr;
+	char __iomem *wrptr;
 	u32  regdmaSlot;
 	u32  offset;
-	long length;
-	long ts_length;
+	u32  length;
+	u32  ts_length;
 	u32  enableInt;
 	u32  swts = 0;
 	u32  mask = 0;
@@ -1609,15 +1734,15 @@ static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
 	 * Start REGDMA with command offset and size
 	 */
 	regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	length = ((long)wrptr - (long)ctx->regdma_base) / 4;
-	offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
-				REGDMA_RAM_REGDMA_CMD_RAM));
+	length = (wrptr - ctx->regdma_base) / 4;
+	offset = (ctx->regdma_base - (rot->mdss_base +
+				REGDMA_RAM_REGDMA_CMD_RAM)) / sizeof(u32);
 	enableInt = ((ctx->timestamp & 1) + 1) << 30;
 	trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
 			REGDMA_CMD_TRIG_SEL_SW_START;
 
 	SDEROT_DBG(
-		"regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
+		"regdma(%d)[%d] <== INT:0x%X|length:%d|offset:0x%X, ts:%X\n",
 		queue_id, regdmaSlot, enableInt, length, offset,
 		ctx->timestamp);
 
@@ -1647,6 +1772,7 @@ static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
 		sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
 		offset += length;
 		ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
+		ts_length /= sizeof(u32);
 		WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
 
 		/* ensure command packet is issue before the submit command */
@@ -1752,6 +1878,7 @@ static u32 sde_hw_rotator_wait_done_regdma(
 	u32 int_id;
 	u32 swts;
 	u32 sts = 0;
+	u32 ubwcerr = 0;
 	unsigned long flags;
 
 	if (rot->irq_num >= 0) {
@@ -1788,8 +1915,26 @@ static u32 sde_hw_rotator_wait_done_regdma(
 			else if (status & REGDMA_INVALID_CMD)
 				SDEROT_ERR("REGDMA invalid command\n");
 
-			sde_hw_rotator_dump_status(rot);
-			status = ROT_ERROR_BIT;
+			sde_hw_rotator_dump_status(rot, &ubwcerr);
+
+			if (ubwcerr) {
+				/*
+				 * Perform recovery for ROT SSPP UBWC decode
+				 * error.
+				 * - SW reset rotator hw block
+				 * - reset TS logic so all pending rotation
+				 *   in hw queue got done signalled
+				 */
+				spin_unlock_irqrestore(&rot->rotisr_lock,
+						flags);
+				if (!sde_hw_rotator_reset(rot, ctx))
+					status = REGDMA_INCOMPLETE_CMD;
+				else
+					status = ROT_ERROR_BIT;
+				spin_lock_irqsave(&rot->rotisr_lock, flags);
+			} else {
+				status = ROT_ERROR_BIT;
+			}
 		} else {
 			if (rc == 1)
 				SDEROT_WARN(
@@ -1815,12 +1960,12 @@ static u32 sde_hw_rotator_wait_done_regdma(
 		if (last_isr & REGDMA_INT_ERR_MASK) {
 			SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
 				ctx->timestamp, swts, last_isr);
-			sde_hw_rotator_dump_status(rot);
+			sde_hw_rotator_dump_status(rot, NULL);
 			status = ROT_ERROR_BIT;
 		} else if (pending) {
 			SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
 				ctx->timestamp, swts, last_isr);
-			sde_hw_rotator_dump_status(rot);
+			sde_hw_rotator_dump_status(rot, NULL);
 			status = ROT_ERROR_BIT;
 		} else {
 			status = 0;
@@ -1830,7 +1975,7 @@ static u32 sde_hw_rotator_wait_done_regdma(
 				last_isr);
 	}
 
-	sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
+	sts = (status & (ROT_ERROR_BIT | REGDMA_INCOMPLETE_CMD)) ? -ENODEV : 0;
 
 	if (status & ROT_ERROR_BIT)
 		SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
@@ -2323,7 +2468,7 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
 				if (status & BIT(0)) {
 					SDEROT_ERR("rotator busy 0x%x\n",
 							status);
-					sde_hw_rotator_dump_status(rot);
+					sde_hw_rotator_dump_status(rot, NULL);
 					SDEROT_EVTLOG_TOUT_HANDLER("rot",
 							"vbif_dbg_bus",
 							"panic");
@@ -3435,21 +3580,22 @@ int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
 	/* REGDMA initialization */
 	if (rot->mode == ROT_REGDMA_OFF) {
 		for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
-			rot->cmd_wr_ptr[0][i] = &rot->cmd_queue[
-				SDE_HW_ROT_REGDMA_SEG_SIZE * i];
+			rot->cmd_wr_ptr[0][i] = (char __iomem *)(
+					&rot->cmd_queue[
+					SDE_HW_ROT_REGDMA_SEG_SIZE * i]);
 	} else {
 		for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
 			rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
-				(u32 *)(rot->mdss_base +
+				rot->mdss_base +
 					REGDMA_RAM_REGDMA_CMD_RAM +
-					SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i);
+					SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i;
 
 		for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
 			rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
-				(u32 *)(rot->mdss_base +
+				rot->mdss_base +
 					REGDMA_RAM_REGDMA_CMD_RAM +
 					SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
-					(i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
+					(i + SDE_HW_ROT_REGDMA_TOTAL_CTX);
 	}
 
 	for (i = 0; i < ROT_QUEUE_MAX; i++) {
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index 4c1316c..8b391ea 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -218,8 +218,8 @@ struct sde_hw_rotator_context {
 	enum   sde_rot_queue_prio q_id;
 	u32    session_id;
 	u32    sequence_id;
-	u32    *regdma_base;
-	u32    *regdma_wrptr;
+	char __iomem *regdma_base;
+	char __iomem *regdma_wrptr;
 	u32    timestamp;
 	struct completion rot_comp;
 	wait_queue_head_t regdma_waitq;
@@ -280,7 +280,7 @@ struct sde_hw_rotator {
 	u32    cmd_queue[SDE_HW_ROT_REGDMA_RAM_SIZE];
 
 	/* Cmd Queue Write Ptr */
-	u32   *cmd_wr_ptr[ROT_QUEUE_MAX][SDE_HW_ROT_REGDMA_TOTAL_CTX];
+	char __iomem *cmd_wr_ptr[ROT_QUEUE_MAX][SDE_HW_ROT_REGDMA_TOTAL_CTX];
 
 	/* Rotator Context */
 	struct sde_hw_rotator_context
@@ -349,7 +349,7 @@ static inline u32 sde_hw_rotator_get_regdma_ctxidx(
  * @ctx: Rotator Context
  * return: base segment address
  */
-static inline u32 *sde_hw_rotator_get_regdma_segment_base(
+static inline char __iomem *sde_hw_rotator_get_regdma_segment_base(
 		struct sde_hw_rotator_context *ctx)
 {
 	SDEROT_DBG("regdma base @slot[%d]: %p\n",
@@ -365,11 +365,11 @@ static inline u32 *sde_hw_rotator_get_regdma_segment_base(
  * @ctx: Rotator Context
  * return: segment address
  */
-static inline u32 *sde_hw_rotator_get_regdma_segment(
+static inline char __iomem *sde_hw_rotator_get_regdma_segment(
 		struct sde_hw_rotator_context *ctx)
 {
 	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	u32 *addr = ctx->regdma_wrptr;
+	char __iomem *addr = ctx->regdma_wrptr;
 
 	SDEROT_DBG("regdma slot[%d] ==> %p\n", idx, addr);
 	return addr;
@@ -383,7 +383,7 @@ static inline u32 *sde_hw_rotator_get_regdma_segment(
  */
 static inline void sde_hw_rotator_put_regdma_segment(
 		struct sde_hw_rotator_context *ctx,
-		u32 *wrptr)
+		char __iomem *wrptr)
 {
 	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
index 9ef4282..ac4ab54 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
@@ -35,6 +35,7 @@
 
 #include "sde_rotator_util.h"
 #include "sde_rotator_smmu.h"
+#include "sde_rotator_debug.h"
 
 #define Y_TILEWIDTH     48
 #define Y_TILEHEIGHT    4
@@ -1038,6 +1039,8 @@ int sde_mdp_data_map(struct sde_mdp_data *data, bool rotator, int dir)
 			break;
 		}
 	}
+	SDEROT_EVTLOG(data->num_planes, dir, data->p[0].addr, data->p[0].len,
+			data->p[0].mapped);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index c6d31f7..f2f6d58 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -708,9 +708,9 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
 
 	if (inst->clk_data.buffer_counter < DCVS_FTB_WINDOW ||
 		!msm_vidc_clock_scaling)
-		inst->clk_data.curr_freq = msm_vidc_max_freq(inst->core);
+		inst->clk_data.min_freq = msm_vidc_max_freq(inst->core);
 	else
-		inst->clk_data.curr_freq = freq;
+		inst->clk_data.min_freq = freq;
 
 	msm_vidc_set_clocks(inst->core);
 
@@ -1106,6 +1106,16 @@ int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
 	lp_cycles = inst->session_type == MSM_VIDC_ENCODER ?
 			inst->clk_data.entry->low_power_cycles :
 			inst->clk_data.entry->vpp_cycles;
+	/*
+	 * Incase there is only 1 core enabled, mark it as the core
+	 * with min load. This ensures that this core is selected and
+	 * video session is set to run on the enabled core.
+	 */
+	if (inst->capability.max_video_cores.max <= VIDC_CORE_ID_1) {
+		min_core_id = min_lp_core_id = VIDC_CORE_ID_1;
+		min_load = core0_load;
+		min_lp_load = core0_lp_load;
+	}
 
 	current_inst_load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS) *
 		inst->clk_data.entry->vpp_cycles;
@@ -1129,7 +1139,8 @@ int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
 		V4L2_CID_MPEG_VIDC_VIDEO_HYBRID_HIERP_MODE);
 
 	/* Try for preferred core based on settings. */
-	if (inst->session_type == MSM_VIDC_ENCODER && hier_mode) {
+	if (inst->session_type == MSM_VIDC_ENCODER && hier_mode &&
+		inst->capability.max_video_cores.max >= VIDC_CORE_ID_3) {
 		if (current_inst_load / 2 + core0_load <= max_freq &&
 			current_inst_load / 2 + core1_load <= max_freq) {
 			if (inst->clk_data.work_mode == VIDC_WORK_MODE_2) {
@@ -1140,7 +1151,8 @@ int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
 		}
 	}
 
-	if (inst->session_type == MSM_VIDC_ENCODER && hier_mode) {
+	if (inst->session_type == MSM_VIDC_ENCODER && hier_mode &&
+		inst->capability.max_video_cores.max >= VIDC_CORE_ID_3) {
 		if (current_inst_lp_load / 2 +
 				core0_lp_load <= max_freq &&
 			current_inst_lp_load / 2 +
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 2beb90c..f8c5798 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1686,6 +1686,11 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
 		event_notify->crop_data.height;
 	inst->prop.crop_info.width =
 		event_notify->crop_data.width;
+	/* HW returns progressive_only flag in pic_struct. */
+	inst->pic_struct =
+		event_notify->pic_struct ?
+		MSM_VIDC_PIC_STRUCT_PROGRESSIVE :
+		MSM_VIDC_PIC_STRUCT_MAYBE_INTERLACED;
 
 	ptr = (u32 *)seq_changed_event.u.data;
 	ptr[0] = event_notify->height;
@@ -3384,7 +3389,7 @@ static int set_output_buffers(struct msm_vidc_inst *inst,
 	enum hal_buffer buffer_type)
 {
 	int rc = 0;
-	struct internal_buf *binfo;
+	struct internal_buf *binfo = NULL;
 	u32 smem_flags = 0, buffer_size;
 	struct hal_buffer_requirements *output_buf, *extradata_buf;
 	int i;
@@ -3493,10 +3498,10 @@ static int set_output_buffers(struct msm_vidc_inst *inst,
 	}
 	return rc;
 fail_set_buffers:
-	kfree(binfo);
-fail_kzalloc:
 	msm_comm_smem_free(inst, &binfo->smem);
 err_no_mem:
+	kfree(binfo);
+fail_kzalloc:
 	return rc;
 }
 
@@ -6329,10 +6334,26 @@ struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst,
 	}
 
 	mutex_lock(&inst->registeredbufs.lock);
-	list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
-		if (msm_comm_compare_dma_planes(inst, mbuf, dma_planes)) {
-			found = true;
-			break;
+	if (inst->session_type == MSM_VIDC_DECODER) {
+		list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+			if (msm_comm_compare_dma_planes(inst, mbuf,
+					dma_planes)) {
+				found = true;
+				break;
+			}
+		}
+	} else {
+		/*
+		 * for encoder, client may queue the same buffer with different
+		 * fd before driver returned old buffer to the client. This
+		 * buffer should be treated as new buffer. Search the list with
+		 * fd so that it will be treated as new msm_vidc_buffer.
+		 */
+		list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
+			if (msm_comm_compare_vb2_planes(inst, mbuf, vb2)) {
+				found = true;
+				break;
+			}
 		}
 	}
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index a82b598..56524ccd4 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -289,6 +289,7 @@ static const struct of_device_id msm_vidc_dt_match[] = {
 		.compatible = "qcom,sdm670-vidc",
 		.data = &sdm670_data,
 	},
+	{},
 };
 
 MODULE_DEVICE_TABLE(of, msm_vidc_dt_match);
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 5d2ed29..e260886 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1693,6 +1693,12 @@ static int venus_hfi_core_init(void *device)
 
 	dev->bus_vote.data =
 		kzalloc(sizeof(struct vidc_bus_vote_data), GFP_KERNEL);
+	if (!dev->bus_vote.data) {
+		dprintk(VIDC_ERR, "Bus vote data memory is not allocated\n");
+		rc = -ENOMEM;
+		goto err_no_mem;
+	}
+
 	dev->bus_vote.data_count = 1;
 	dev->bus_vote.data->power_mode = VIDC_POWER_TURBO;
 
@@ -1769,6 +1775,7 @@ static int venus_hfi_core_init(void *device)
 	__set_state(dev, VENUS_STATE_DEINIT);
 	__unload_fw(dev);
 err_load_fw:
+err_no_mem:
 	dprintk(VIDC_ERR, "Core init failed\n");
 	mutex_unlock(&dev->lock);
 	return rc;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index af409aa..eb14f0f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -3908,6 +3908,7 @@ static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card,
 	struct mmc_host *host = card->host;
 	struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
 	u8 part_config = card->ext_csd.part_config;
+	int ret = 0, err = 0;
 
 	if ((main_md->part_curr == md->part_type) &&
 	    (card->part_curr == md->part_type))
@@ -3917,40 +3918,70 @@ static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card,
 		 card->ext_csd.cmdq_support &&
 		 (md->flags & MMC_BLK_CMD_QUEUE)));
 
-	if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state))
-		WARN_ON(mmc_cmdq_halt(host, true));
+	if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state)) {
+		ret = mmc_cmdq_halt(host, true);
+		if (ret) {
+			pr_err("%s: %s: halt: failed: %d\n",
+				mmc_hostname(host), __func__,  ret);
+			goto out;
+		}
+	}
 
 	/* disable CQ mode in card */
 	if (mmc_card_cmdq(card)) {
-		WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				 EXT_CSD_CMDQ, 0,
-				  card->ext_csd.generic_cmd6_time));
+				 card->ext_csd.generic_cmd6_time);
+		if (ret) {
+			pr_err("%s: %s: cmdq mode disable failed %d\n",
+				mmc_hostname(host), __func__, ret);
+			goto cmdq_unhalt;
+		}
 		mmc_card_clr_cmdq(card);
 	}
 
 	part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
 	part_config |= md->part_type;
 
-	WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 			 EXT_CSD_PART_CONFIG, part_config,
-			  card->ext_csd.part_time));
+			 card->ext_csd.part_time);
+	if (ret) {
+		pr_err("%s: %s: mmc_switch failure, %d -> %d , err = %d\n",
+			mmc_hostname(host), __func__, main_md->part_curr,
+			md->part_type, ret);
+		goto cmdq_switch;
+	}
 
 	card->ext_csd.part_config = part_config;
 	card->part_curr = md->part_type;
 
 	main_md->part_curr = md->part_type;
 
-	WARN_ON(mmc_blk_cmdq_switch(card, md, true));
-	WARN_ON(mmc_cmdq_halt(host, false));
-
-	return 0;
+cmdq_switch:
+	err = mmc_blk_cmdq_switch(card, md, true);
+	if (err) {
+		pr_err("%s: %s: mmc_blk_cmdq_switch failed: %d\n",
+			mmc_hostname(host), __func__,  err);
+		ret = err;
+	}
+cmdq_unhalt:
+	err = mmc_cmdq_halt(host, false);
+	if (err) {
+		pr_err("%s: %s: unhalt: failed: %d\n",
+			mmc_hostname(host), __func__,  err);
+		ret = err;
+	}
+out:
+	return ret;
 }
 
 static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
 {
-	int ret;
+	int ret, err = 0;
 	struct mmc_blk_data *md = mq->data;
 	struct mmc_card *card = md->queue.card;
+	struct mmc_host *host = card->host;
 
 	mmc_get_card(card);
 
@@ -3971,9 +4002,20 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
 
 	ret = mmc_blk_cmdq_part_switch(card, md);
 	if (ret) {
-		pr_err("%s: %s: partition switch failed %d\n",
+		pr_err("%s: %s: partition switch failed %d, resetting cmdq\n",
 				md->disk->disk_name, __func__, ret);
-		goto out;
+
+		mmc_blk_cmdq_reset(host, false);
+		err = mmc_blk_cmdq_part_switch(card, md);
+		if (!err) {
+			pr_err("%s: %s: partition switch success err = %d\n",
+				md->disk->disk_name, __func__, err);
+		} else {
+			pr_err("%s: %s: partition switch failed err = %d\n",
+				md->disk->disk_name, __func__, err);
+			ret = err;
+			goto out;
+		}
 	}
 
 	if (req) {
diff --git a/drivers/net/wireless/ath/wil6210/ftm.c b/drivers/net/wireless/ath/wil6210/ftm.c
index f94c894..6de8e0e 100644
--- a/drivers/net/wireless/ath/wil6210/ftm.c
+++ b/drivers/net/wireless/ath/wil6210/ftm.c
@@ -683,6 +683,10 @@ void wil_aoa_evt_meas(struct wil6210_priv *wil,
 	int data_len = len - offsetof(struct wmi_aoa_meas_event, meas_data);
 	struct wil_aoa_meas_result *res;
 
+	if (data_len < 0) {
+		wil_err(wil, "AOA event too short (%d)\n", len);
+		return;
+	}
 	data_len = min_t(int, le16_to_cpu(evt->length), data_len);
 
 	res = kmalloc(sizeof(*res) + data_len, GFP_KERNEL);
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index c9f5c11..d92db11 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -16,6 +16,6 @@
 obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
-obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
+obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o pinctrl-sdm845-v2.o
 obj-$(CONFIG_PINCTRL_SDM670) += pinctrl-sdm670.o
 obj-$(CONFIG_PINCTRL_SDXPOORWILLS)	+= pinctrl-sdxpoorwills.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 166935ec..13e9a0d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -39,7 +39,6 @@
 
 #define MAX_NR_GPIO 300
 #define PS_HOLD_OFFSET 0x820
-#define STATUS_OFFSET 0x10
 
 /**
  * struct msm_pinctrl - state for a pinctrl-msm device
@@ -72,35 +71,6 @@ struct msm_pinctrl {
 	void __iomem *regs;
 };
 
-static u32 msm_pinctrl_find_base(const struct msm_pinctrl *pctrl, u32 gpio_id)
-{
-	int i;
-	u32 val;
-	const struct msm_pinctrl_soc_data *soc_data = pctrl->soc;
-
-	if (gpio_id >= soc_data->ngpios || !soc_data->pin_base)
-		return 0;
-
-	if (soc_data->pin_base[gpio_id])
-		return soc_data->pin_base[gpio_id];
-
-	for (i = 0; i < soc_data->n_tile_offsets; i++) {
-		val = readl_relaxed(pctrl->regs +
-			soc_data->tile_offsets[i] + STATUS_OFFSET
-			+ soc_data->reg_size * gpio_id);
-		if (val) {
-			soc_data->pin_base[gpio_id] = soc_data->tile_offsets[i];
-			return soc_data->tile_offsets[i];
-		}
-	}
-
-	/* In the case that the soc_data does not support dynamic base
-	 * detection, we return 0 here.
-	 */
-	WARN_ONCE(1, "%s:Dynamic base detection is not supported\n", __func__);
-	return 0;
-}
-
 static int msm_get_groups_count(struct pinctrl_dev *pctldev)
 {
 	struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -170,11 +140,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
 	struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
 	const struct msm_pingroup *g;
 	unsigned long flags;
-	u32 val, mask, base;
+	u32 val, mask;
 	int i;
 
 	g = &pctrl->soc->groups[group];
-	base = msm_pinctrl_find_base(pctrl, group);
 	mask = GENMASK(g->mux_bit + order_base_2(g->nfuncs) - 1, g->mux_bit);
 
 	for (i = 0; i < g->nfuncs; i++) {
@@ -187,10 +156,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
-	val = readl(pctrl->regs + base + g->ctl_reg);
+	val = readl(pctrl->regs + g->ctl_reg);
 	val &= ~mask;
 	val |= i << g->mux_bit;
-	writel(val, pctrl->regs + base + g->ctl_reg);
+	writel(val, pctrl->regs + g->ctl_reg);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 
@@ -255,16 +224,15 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
 	unsigned arg;
 	unsigned bit;
 	int ret;
-	u32 val, base;
+	u32 val;
 
 	g = &pctrl->soc->groups[group];
-	base = msm_pinctrl_find_base(pctrl, group);
 
 	ret = msm_config_reg(pctrl, g, param, &mask, &bit);
 	if (ret < 0)
 		return ret;
 
-	val = readl(pctrl->regs + base + g->ctl_reg);
+	val = readl(pctrl->regs + g->ctl_reg);
 	arg = (val >> bit) & mask;
 
 	/* Convert register value to pinconf value */
@@ -289,7 +257,7 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
 		if (!arg)
 			return -EINVAL;
 
-		val = readl(pctrl->regs + base + g->io_reg);
+		val = readl(pctrl->regs + g->io_reg);
 		arg = !!(val & BIT(g->in_bit));
 		break;
 	case PIN_CONFIG_INPUT_ENABLE:
@@ -320,12 +288,11 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
 	unsigned arg;
 	unsigned bit;
 	int ret;
-	u32 val, base;
+	u32 val;
 	int i;
 
 	g = &pctrl->soc->groups[group];
 
-	base = msm_pinctrl_find_base(pctrl, group);
 	for (i = 0; i < num_configs; i++) {
 		param = pinconf_to_config_param(configs[i]);
 		arg = pinconf_to_config_argument(configs[i]);
@@ -358,12 +325,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
 		case PIN_CONFIG_OUTPUT:
 			/* set output value */
 			spin_lock_irqsave(&pctrl->lock, flags);
-			val = readl(pctrl->regs + base + g->io_reg);
+			val = readl(pctrl->regs + g->io_reg);
 			if (arg)
 				val |= BIT(g->out_bit);
 			else
 				val &= ~BIT(g->out_bit);
-			writel(val, pctrl->regs + base + g->io_reg);
+			writel(val, pctrl->regs + g->io_reg);
 			spin_unlock_irqrestore(&pctrl->lock, flags);
 
 			/* enable output */
@@ -386,10 +353,10 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
 		}
 
 		spin_lock_irqsave(&pctrl->lock, flags);
-		val = readl(pctrl->regs + base + g->ctl_reg);
+		val = readl(pctrl->regs + g->ctl_reg);
 		val &= ~(mask << bit);
 		val |= arg << bit;
-		writel(val, pctrl->regs + base + g->ctl_reg);
+		writel(val, pctrl->regs + g->ctl_reg);
 		spin_unlock_irqrestore(&pctrl->lock, flags);
 	}
 
@@ -414,16 +381,15 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 	const struct msm_pingroup *g;
 	struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
 	unsigned long flags;
-	u32 val, base;
+	u32 val;
 
 	g = &pctrl->soc->groups[offset];
-	base = msm_pinctrl_find_base(pctrl, offset);
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
-	val = readl(pctrl->regs + base + g->ctl_reg);
+	val = readl(pctrl->regs + g->ctl_reg);
 	val &= ~BIT(g->oe_bit);
-	writel(val, pctrl->regs + base + g->ctl_reg);
+	writel(val, pctrl->regs + g->ctl_reg);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 
@@ -435,23 +401,22 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
 	const struct msm_pingroup *g;
 	struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
 	unsigned long flags;
-	u32 val, base;
+	u32 val;
 
 	g = &pctrl->soc->groups[offset];
-	base = msm_pinctrl_find_base(pctrl, offset);
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
-	val = readl(pctrl->regs + base + g->io_reg);
+	val = readl(pctrl->regs + g->io_reg);
 	if (value)
 		val |= BIT(g->out_bit);
 	else
 		val &= ~BIT(g->out_bit);
-	writel(val, pctrl->regs + base + g->io_reg);
+	writel(val, pctrl->regs + g->io_reg);
 
-	val = readl(pctrl->regs + base + g->ctl_reg);
+	val = readl(pctrl->regs + g->ctl_reg);
 	val |= BIT(g->oe_bit);
-	writel(val, pctrl->regs + base + g->ctl_reg);
+	writel(val, pctrl->regs + g->ctl_reg);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 
@@ -462,12 +427,11 @@ static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
 	const struct msm_pingroup *g;
 	struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
-	u32 val, base;
+	u32 val;
 
 	g = &pctrl->soc->groups[offset];
-	base = msm_pinctrl_find_base(pctrl, offset);
 
-	val = readl(pctrl->regs + base + g->io_reg);
+	val = readl(pctrl->regs + g->io_reg);
 	return !!(val & BIT(g->in_bit));
 }
 
@@ -476,19 +440,18 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 	const struct msm_pingroup *g;
 	struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
 	unsigned long flags;
-	u32 val, base;
+	u32 val;
 
 	g = &pctrl->soc->groups[offset];
-	base = msm_pinctrl_find_base(pctrl, offset);
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
-	val = readl(pctrl->regs + base + g->io_reg);
+	val = readl(pctrl->regs + g->io_reg);
 	if (value)
 		val |= BIT(g->out_bit);
 	else
 		val &= ~BIT(g->out_bit);
-	writel(val, pctrl->regs + base + g->io_reg);
+	writel(val, pctrl->regs + g->io_reg);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 }
@@ -508,7 +471,7 @@ static void msm_gpio_dbg_show_one(struct seq_file *s,
 	int is_out;
 	int drive;
 	int pull;
-	u32 ctl_reg, base;
+	u32 ctl_reg;
 
 	static const char * const pulls[] = {
 		"no pull",
@@ -518,9 +481,7 @@ static void msm_gpio_dbg_show_one(struct seq_file *s,
 	};
 
 	g = &pctrl->soc->groups[offset];
-	base = msm_pinctrl_find_base(pctrl, offset);
-
-	ctl_reg = readl(pctrl->regs + base + g->ctl_reg);
+	ctl_reg = readl(pctrl->regs + g->ctl_reg);
 
 	is_out = !!(ctl_reg & BIT(g->oe_bit));
 	func = (ctl_reg >> g->mux_bit) & 7;
@@ -579,21 +540,21 @@ static struct gpio_chip msm_gpio_template = {
  */
 static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
 					  const struct msm_pingroup *g,
-					  struct irq_data *d, u32 base)
+					  struct irq_data *d)
 {
 	int loop_limit = 100;
 	unsigned val, val2, intstat;
 	unsigned pol;
 
 	do {
-		val = readl(pctrl->regs + base + g->io_reg) & BIT(g->in_bit);
+		val = readl(pctrl->regs + g->io_reg) & BIT(g->in_bit);
 
-		pol = readl(pctrl->regs + base + g->intr_cfg_reg);
+		pol = readl(pctrl->regs + g->intr_cfg_reg);
 		pol ^= BIT(g->intr_polarity_bit);
-		writel(pol, pctrl->regs + base + g->intr_cfg_reg);
+		writel(pol, pctrl->regs + g->intr_cfg_reg);
 
-		val2 = readl(pctrl->regs + base + g->io_reg) & BIT(g->in_bit);
-		intstat = readl(pctrl->regs + base + g->intr_status_reg);
+		val2 = readl(pctrl->regs + g->io_reg) & BIT(g->in_bit);
+		intstat = readl(pctrl->regs + g->intr_status_reg);
 		if (intstat || (val == val2))
 			return;
 	} while (loop_limit-- > 0);
@@ -607,16 +568,15 @@ static void msm_gpio_irq_mask(struct irq_data *d)
 	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
 	const struct msm_pingroup *g;
 	unsigned long flags;
-	u32 val, base;
+	u32 val;
 
 	g = &pctrl->soc->groups[d->hwirq];
-	base = msm_pinctrl_find_base(pctrl, d->hwirq);
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
-	val = readl(pctrl->regs + base + g->intr_cfg_reg);
+	val = readl(pctrl->regs + g->intr_cfg_reg);
 	val &= ~BIT(g->intr_enable_bit);
-	writel(val, pctrl->regs + base + g->intr_cfg_reg);
+	writel(val, pctrl->regs + g->intr_cfg_reg);
 
 	clear_bit(d->hwirq, pctrl->enabled_irqs);
 
@@ -629,16 +589,15 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
 	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
 	const struct msm_pingroup *g;
 	unsigned long flags;
-	u32 val, base;
+	u32 val;
 
 	g = &pctrl->soc->groups[d->hwirq];
-	base = msm_pinctrl_find_base(pctrl, d->hwirq);
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
-	val = readl(pctrl->regs + base + g->intr_cfg_reg);
+	val = readl(pctrl->regs + g->intr_cfg_reg);
 	val |= BIT(g->intr_enable_bit);
-	writel(val, pctrl->regs + base + g->intr_cfg_reg);
+	writel(val, pctrl->regs + g->intr_cfg_reg);
 
 	set_bit(d->hwirq, pctrl->enabled_irqs);
 
@@ -651,22 +610,21 @@ static void msm_gpio_irq_ack(struct irq_data *d)
 	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
 	const struct msm_pingroup *g;
 	unsigned long flags;
-	u32 val, base;
+	u32 val;
 
 	g = &pctrl->soc->groups[d->hwirq];
-	base = msm_pinctrl_find_base(pctrl, d->hwirq);
 
 	spin_lock_irqsave(&pctrl->lock, flags);
 
-	val = readl(pctrl->regs + base + g->intr_status_reg);
+	val = readl(pctrl->regs + g->intr_status_reg);
 	if (g->intr_ack_high)
 		val |= BIT(g->intr_status_bit);
 	else
 		val &= ~BIT(g->intr_status_bit);
-	writel(val, pctrl->regs + base + g->intr_status_reg);
+	writel(val, pctrl->regs + g->intr_status_reg);
 
 	if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
-		msm_gpio_update_dual_edge_pos(pctrl, g, d, base);
+		msm_gpio_update_dual_edge_pos(pctrl, g, d);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 }
@@ -677,10 +635,10 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 	struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
 	const struct msm_pingroup *g;
 	unsigned long flags;
-	u32 val, base;
+	u32 val;
 
 	g = &pctrl->soc->groups[d->hwirq];
-	base = msm_pinctrl_find_base(pctrl, d->hwirq);
+
 	spin_lock_irqsave(&pctrl->lock, flags);
 
 	/*
@@ -692,17 +650,17 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 		clear_bit(d->hwirq, pctrl->dual_edge_irqs);
 
 	/* Route interrupts to application cpu */
-	val = readl(pctrl->regs + base + g->intr_target_reg);
+	val = readl(pctrl->regs + g->intr_target_reg);
 	val &= ~(7 << g->intr_target_bit);
 	val |= g->intr_target_kpss_val << g->intr_target_bit;
-	writel(val, pctrl->regs + base + g->intr_target_reg);
+	writel(val, pctrl->regs + g->intr_target_reg);
 
 	/* Update configuration for gpio.
 	 * RAW_STATUS_EN is left on for all gpio irqs. Due to the
 	 * internal circuitry of TLMM, toggling the RAW_STATUS
 	 * could cause the INTR_STATUS to be set for EDGE interrupts.
 	 */
-	val = readl(pctrl->regs + base + g->intr_cfg_reg);
+	val = readl(pctrl->regs + g->intr_cfg_reg);
 	val |= BIT(g->intr_raw_status_bit);
 	if (g->intr_detection_width == 2) {
 		val &= ~(3 << g->intr_detection_bit);
@@ -750,10 +708,10 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 	} else {
 		BUG();
 	}
-	writel(val, pctrl->regs + base + g->intr_cfg_reg);
+	writel(val, pctrl->regs + g->intr_cfg_reg);
 
 	if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
-		msm_gpio_update_dual_edge_pos(pctrl, g, d, base);
+		msm_gpio_update_dual_edge_pos(pctrl, g, d);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 
@@ -882,7 +840,7 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int irq_pin;
 	int handled = 0;
-	u32 val, base;
+	u32 val;
 	int i;
 
 	chained_irq_enter(chip, desc);
@@ -893,8 +851,7 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
 	 */
 	for_each_set_bit(i, pctrl->enabled_irqs, pctrl->chip.ngpio) {
 		g = &pctrl->soc->groups[i];
-		base = msm_pinctrl_find_base(pctrl, i);
-		val = readl(pctrl->regs + base + g->intr_status_reg);
+		val = readl(pctrl->regs + g->intr_status_reg);
 		if (val & BIT(g->intr_status_bit)) {
 			irq_pin = irq_find_mapping(gc->irqdomain, i);
 			generic_handle_irq(irq_pin);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 7ffc2e3..988b7ca 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -129,10 +129,6 @@ struct msm_pinctrl_soc_data {
 	unsigned ngpios;
 	const struct msm_dir_conn *dir_conn;
 	unsigned int n_dir_conns;
-	const u32 *tile_offsets;
-	unsigned int n_tile_offsets;
-	u32 *pin_base;
-	unsigned int reg_size;
 };
 
 int msm_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index b454cc442..8749653 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -25,8 +25,13 @@
 		.ngroups = ARRAY_SIZE(fname##_groups),	\
 	}
 
+#define NORTH	0x00500000
+#define SOUTH	0x00900000
+#define WEST	0x00100000
+
+#define DUMMY	0x0
 #define REG_SIZE 0x1000
-#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
 	{						\
 		.name = "gpio" #id,			\
 		.pins = gpio##id##_pins,		\
@@ -44,11 +49,11 @@
 			msm_mux_##f9			\
 		},					\
 		.nfuncs = 10,				\
-		.ctl_reg = REG_SIZE * id,		\
-		.io_reg = 0x4 + REG_SIZE * id,		\
-		.intr_cfg_reg = 0x8 + REG_SIZE * id,	\
-		.intr_status_reg = 0xc + REG_SIZE * id,	\
-		.intr_target_reg = 0x8 + REG_SIZE * id,	\
+		.ctl_reg = base + REG_SIZE * id,		\
+		.io_reg = base + 0x4 + REG_SIZE * id,		\
+		.intr_cfg_reg = base + 0x8 + REG_SIZE * id,	\
+		.intr_status_reg = base + 0xc + REG_SIZE * id,	\
+		.intr_target_reg = base + 0x8 + REG_SIZE * id,	\
 		.mux_bit = 2,			\
 		.pull_bit = 0,			\
 		.drv_bit = 6,			\
@@ -114,10 +119,6 @@
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
 	}
-
-static const u32 sdm670_tile_offsets[] = {0x100000, 0x500000, 0x900000};
-static u32 sdm670_pin_base[150];
-
 static const struct pinctrl_pin_desc sdm670_pins[] = {
 	PINCTRL_PIN(0, "GPIO_0"),
 	PINCTRL_PIN(1, "GPIO_1"),
@@ -1332,259 +1333,258 @@ static const struct msm_function sdm670_functions[] = {
  * Clients would not be able to request these dummy pin groups.
  */
 static const struct msm_pingroup sdm670_groups[] = {
-	[0] = PINGROUP(0, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
-	[1] = PINGROUP(1, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
-	[2] = PINGROUP(2, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
-	[3] = PINGROUP(3, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
-	[4] = PINGROUP(4, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	[5] = PINGROUP(5, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	[6] = PINGROUP(6, qup9, NA, ddr_pxi0, NA, NA, NA, NA, NA, NA),
-	[7] = PINGROUP(7, qup9, ddr_bist, NA, atest_tsens2,
+	[0] = PINGROUP(0, SOUTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+	[1] = PINGROUP(1, SOUTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+	[2] = PINGROUP(2, SOUTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+	[3] = PINGROUP(3, SOUTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
+	[4] = PINGROUP(4, NORTH, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+	[5] = PINGROUP(5, NORTH, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+	[6] = PINGROUP(6, NORTH, qup9, NA, ddr_pxi0, NA, NA, NA, NA, NA, NA),
+	[7] = PINGROUP(7, NORTH, qup9, ddr_bist, NA, atest_tsens2,
 		       vsense_trigger, atest_usb1, ddr_pxi0, NA, NA),
-	[8] = PINGROUP(8, qup_l4, GP_PDM1, ddr_bist, NA, NA, NA, NA, NA,
+	[8] = PINGROUP(8, WEST, qup_l4, GP_PDM1, ddr_bist, NA, NA, NA, NA, NA,
 		       NA),
-	[9] = PINGROUP(9, qup_l5, ddr_bist, NA, NA, NA, NA, NA, NA, NA),
-	[10] = PINGROUP(10, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
+	[9] = PINGROUP(9, WEST, qup_l5, ddr_bist, NA, NA, NA, NA, NA, NA, NA),
+	[10] = PINGROUP(10, NORTH, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
 			atest_usb11, ddr_pxi2, NA, NA, NA),
-	[11] = PINGROUP(11, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
+	[11] = PINGROUP(11, NORTH, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
 			atest_usb10, ddr_pxi2, NA, NA, NA),
-	[12] = PINGROUP(12, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, NA,
+	[12] = PINGROUP(12, SOUTH, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, NA,
 			NA, NA, NA, NA),
-	[13] = PINGROUP(13, cam_mclk, pll_bypassnl, qdss_gpio0, ddr_pxi3,
+	[13] = PINGROUP(13, WEST, cam_mclk, pll_bypassnl, qdss_gpio0, ddr_pxi3,
 			NA, NA, NA, NA, NA),
-	[14] = PINGROUP(14, cam_mclk, pll_reset, qdss_gpio1, NA, NA, NA,
+	[14] = PINGROUP(14, WEST, cam_mclk, pll_reset, qdss_gpio1, NA, NA, NA,
 			NA, NA, NA),
-	[15] = PINGROUP(15, cam_mclk, qdss_gpio2, NA, NA, NA, NA, NA, NA,
+	[15] = PINGROUP(15, WEST, cam_mclk, qdss_gpio2, NA, NA, NA, NA, NA, NA,
 			NA),
-	[16] = PINGROUP(16, cam_mclk, qdss_gpio3, NA, NA, NA, NA, NA, NA,
+	[16] = PINGROUP(16, WEST, cam_mclk, qdss_gpio3, NA, NA, NA, NA, NA, NA,
 			NA),
-	[17] = PINGROUP(17, cci_i2c, qup1, qdss_gpio4, NA, NA, NA, NA,
+	[17] = PINGROUP(17, WEST, cci_i2c, qup1, qdss_gpio4, NA, NA, NA, NA,
 			NA, NA),
-	[18] = PINGROUP(18, cci_i2c, qup1, NA, qdss_gpio5, NA, NA, NA,
+	[18] = PINGROUP(18, WEST, cci_i2c, qup1, NA, qdss_gpio5, NA, NA, NA,
 			NA, NA),
-	[19] = PINGROUP(19, cci_i2c, qup1, NA, qdss_gpio6, NA, NA, NA,
+	[19] = PINGROUP(19, WEST, cci_i2c, qup1, NA, qdss_gpio6, NA, NA, NA,
 			NA, NA),
-	[20] = PINGROUP(20, cci_i2c, qup1, NA, qdss_gpio7, NA, NA, NA,
+	[20] = PINGROUP(20, WEST, cci_i2c, qup1, NA, qdss_gpio7, NA, NA, NA,
 			NA, NA),
-	[21] = PINGROUP(21, cci_timer0, gcc_gp2, qdss_gpio8, NA, NA, NA,
+	[21] = PINGROUP(21, WEST, cci_timer0, gcc_gp2, qdss_gpio8, NA, NA, NA,
 			NA, NA, NA),
-	[22] = PINGROUP(22, cci_timer1, gcc_gp3, qdss_gpio, NA, NA, NA,
+	[22] = PINGROUP(22, WEST, cci_timer1, gcc_gp3, qdss_gpio, NA, NA, NA,
 			NA, NA, NA),
-	[23] = PINGROUP(23, cci_timer2, qdss_gpio9, NA, NA, NA, NA, NA,
+	[23] = PINGROUP(23, WEST, cci_timer2, qdss_gpio9, NA, NA, NA, NA, NA,
 			NA, NA),
-	[24] = PINGROUP(24, cci_timer3, cci_async, qdss_gpio10, NA, NA,
+	[24] = PINGROUP(24, WEST, cci_timer3, cci_async, qdss_gpio10, NA, NA,
 			NA, NA, NA, NA),
-	[25] = PINGROUP(25, cci_timer4, cci_async, qdss_gpio11, NA, NA,
+	[25] = PINGROUP(25, WEST, cci_timer4, cci_async, qdss_gpio11, NA, NA,
 			NA, NA, NA, NA),
-	[26] = PINGROUP(26, cci_async, qdss_gpio12, JITTER_BIST, NA, NA,
+	[26] = PINGROUP(26, WEST, cci_async, qdss_gpio12, JITTER_BIST, NA, NA,
 			NA, NA, NA, NA),
-	[27] = PINGROUP(27, qup2, qdss_gpio13, PLL_BIST, NA, NA, NA, NA,
+	[27] = PINGROUP(27, WEST, qup2, qdss_gpio13, PLL_BIST, NA, NA, NA, NA,
 			NA, NA),
-	[28] = PINGROUP(28, qup2, qdss_gpio14, AGERA_PLL, NA, NA, NA, NA,
+	[28] = PINGROUP(28, WEST, qup2, qdss_gpio14, AGERA_PLL, NA, NA, NA, NA,
 			NA, NA),
-	[29] = PINGROUP(29, qup2, NA, phase_flag1, qdss_gpio15,
+	[29] = PINGROUP(29, WEST, qup2, NA, phase_flag1, qdss_gpio15,
 			atest_tsens, NA, NA, NA, NA),
-	[30] = PINGROUP(30, qup2, phase_flag2, qdss_gpio, NA, NA, NA, NA,
+	[30] = PINGROUP(30, WEST, qup2, phase_flag2, qdss_gpio, NA, NA, NA, NA,
 			NA, NA),
-	[31] = PINGROUP(31, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
-	[32] = PINGROUP(32, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
-	[33] = PINGROUP(33, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
-	[34] = PINGROUP(34, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
-	[35] = PINGROUP(35, pci_e0, QUP_L4, JITTER_BIST, NA, NA, NA, NA,
+	[31] = PINGROUP(31, WEST, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+	[32] = PINGROUP(32, WEST, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+	[33] = PINGROUP(33, WEST, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+	[34] = PINGROUP(34, WEST, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
+	[35] = PINGROUP(35, NORTH, pci_e0, QUP_L4, JITTER_BIST, NA, NA, NA, NA,
 			NA, NA),
-	[36] = PINGROUP(36, pci_e0, QUP_L5, PLL_BIST, NA, NA, NA, NA,
+	[36] = PINGROUP(36, NORTH, pci_e0, QUP_L5, PLL_BIST, NA, NA, NA, NA,
 			NA, NA),
-	[37] = PINGROUP(37, QUP_L6, AGERA_PLL, NA, NA, NA, NA, NA, NA,
+	[37] = PINGROUP(37, NORTH, QUP_L6, AGERA_PLL, NA, NA, NA, NA, NA, NA,
 			NA),
-	[38] = PINGROUP(38, usb_phy, NA, NA, NA, NA, NA, NA, NA, NA),
-	[39] = PINGROUP(39, lpass_slimbus, NA, NA, NA, NA, NA, NA, NA,
+	[38] = PINGROUP(38, NORTH, usb_phy, NA, NA, NA, NA, NA, NA, NA, NA),
+	[39] = PINGROUP(39, NORTH, lpass_slimbus, NA, NA, NA, NA, NA, NA, NA,
 			NA),
-	[40] = PINGROUP(40, sd_write, tsif1_error, NA, NA, NA, NA, NA,
+	[40] = PINGROUP(40, NORTH, sd_write, tsif1_error, NA, NA, NA, NA, NA,
 			NA, NA),
-	[41] = PINGROUP(41, qup3, NA, qdss_gpio6, NA, NA, NA, NA, NA,
+	[41] = PINGROUP(41, SOUTH, qup3, NA, qdss_gpio6, NA, NA, NA, NA, NA,
 			NA),
-	[42] = PINGROUP(42, qup3, NA, qdss_gpio7, NA, NA, NA, NA, NA,
+	[42] = PINGROUP(42, SOUTH, qup3, NA, qdss_gpio7, NA, NA, NA, NA, NA,
 			NA),
-	[43] = PINGROUP(43, qup3, NA, qdss_gpio14, NA, NA, NA, NA, NA,
+	[43] = PINGROUP(43, SOUTH, qup3, NA, qdss_gpio14, NA, NA, NA, NA, NA,
 			NA),
-	[44] = PINGROUP(44, qup3, NA, qdss_gpio15, NA, NA, NA, NA, NA,
+	[44] = PINGROUP(44, SOUTH, qup3, NA, qdss_gpio15, NA, NA, NA, NA, NA,
 			NA),
-	[45] = PINGROUP(45, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
-	[46] = PINGROUP(46, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
-	[47] = PINGROUP(47, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
-	[48] = PINGROUP(48, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
-	[49] = PINGROUP(49, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
-	[50] = PINGROUP(50, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
-	[51] = PINGROUP(51, qup12, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	[52] = PINGROUP(52, qup12, phase_flag16, qdss_cti, NA, NA, NA,
+	[45] = PINGROUP(45, SOUTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+	[46] = PINGROUP(46, SOUTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+	[47] = PINGROUP(47, SOUTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+	[48] = PINGROUP(48, SOUTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
+	[49] = PINGROUP(49, NORTH, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
+	[50] = PINGROUP(50, NORTH, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
+	[51] = PINGROUP(51, NORTH, qup12, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+	[52] = PINGROUP(52, NORTH, qup12, phase_flag16, qdss_cti, NA, NA, NA,
 			NA, NA, NA),
-	[53] = PINGROUP(53, qup10, phase_flag11, NA, NA, NA, NA, NA, NA,
+	[53] = PINGROUP(53, NORTH, qup10, phase_flag11, NA, NA, NA, NA, NA, NA,
 			NA),
-	[54] = PINGROUP(54, qup10, GP_PDM0, phase_flag12, NA,
+	[54] = PINGROUP(54, NORTH, qup10, GP_PDM0, phase_flag12, NA,
 			wlan1_adc1, atest_usb13, ddr_pxi1, NA, NA),
-	[55] = PINGROUP(55, qup10, phase_flag13, NA, wlan1_adc0,
+	[55] = PINGROUP(55, NORTH, qup10, phase_flag13, NA, wlan1_adc0,
 			atest_usb12, ddr_pxi1, NA, NA, NA),
-	[56] = PINGROUP(56, qup10, phase_flag17, NA, NA, NA, NA, NA, NA,
+	[56] = PINGROUP(56, NORTH, qup10, phase_flag17, NA, NA, NA, NA, NA, NA,
 			NA),
-	[57] = PINGROUP(57, qua_mi2s, gcc_gp1, phase_flag18, NA, NA, NA,
+	[57] = PINGROUP(57, NORTH, qua_mi2s, gcc_gp1, phase_flag18, NA, NA, NA,
 			NA, NA, NA),
-	[58] = PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[59] = PINGROUP(59, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[60] = PINGROUP(60, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[61] = PINGROUP(61, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[62] = PINGROUP(62, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[63] = PINGROUP(63, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[64] = PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[65] = PINGROUP(65, pri_mi2s, qup8, wsa_clk, NA, NA, NA, NA, NA,
+	[58] = PINGROUP(58, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[59] = PINGROUP(59, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[60] = PINGROUP(60, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[61] = PINGROUP(61, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[62] = PINGROUP(62, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[63] = PINGROUP(63, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[64] = PINGROUP(64, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[65] = PINGROUP(65, NORTH, pri_mi2s, qup8, wsa_clk, NA, NA, NA, NA, NA,
 			NA),
-	[66] = PINGROUP(66, pri_mi2s_ws, qup8, wsa_data, GP_PDM1, NA,
+	[66] = PINGROUP(66, NORTH, pri_mi2s_ws, qup8, wsa_data, GP_PDM1, NA,
 			NA, NA, NA, NA),
-	[67] = PINGROUP(67, pri_mi2s, qup8, NA, atest_usb2, NA, NA, NA,
+	[67] = PINGROUP(67, NORTH, pri_mi2s, qup8, NA, atest_usb2, NA, NA, NA,
 			NA, NA),
-	[68] = PINGROUP(68, pri_mi2s, qup8, NA, atest_usb23, NA, NA, NA,
+	[68] = PINGROUP(68, NORTH, pri_mi2s, qup8, NA, atest_usb23, NA, NA, NA,
 			NA, NA),
-	[69] = PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[70] = PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[71] = PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[72] = PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[73] = PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[74] = PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[75] = PINGROUP(75, ter_mi2s, phase_flag8, qdss_gpio8,
+	[69] = PINGROUP(69, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[70] = PINGROUP(70, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[71] = PINGROUP(71, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[72] = PINGROUP(72, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[73] = PINGROUP(73, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[74] = PINGROUP(74, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[75] = PINGROUP(75, NORTH, ter_mi2s, phase_flag8, qdss_gpio8,
 			atest_usb22, QUP_L4, NA, NA, NA, NA),
-	[76] = PINGROUP(76, ter_mi2s, phase_flag9, qdss_gpio9,
+	[76] = PINGROUP(76, NORTH, ter_mi2s, phase_flag9, qdss_gpio9,
 			atest_usb21, QUP_L5, NA, NA, NA, NA),
-	[77] = PINGROUP(77, ter_mi2s, phase_flag4, qdss_gpio10,
+	[77] = PINGROUP(77, NORTH, ter_mi2s, phase_flag4, qdss_gpio10,
 			atest_usb20, QUP_L6, NA, NA, NA, NA),
-	[78] = PINGROUP(78, ter_mi2s, gcc_gp1, NA, NA, NA, NA, NA, NA,
+	[78] = PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, NA, NA, NA, NA, NA, NA,
 			NA),
-	[79] = PINGROUP(79, sec_mi2s, GP_PDM2, NA, qdss_gpio11, NA, NA,
+	[79] = PINGROUP(79, NORTH, sec_mi2s, GP_PDM2, NA, qdss_gpio11, NA, NA,
 			NA, NA, NA),
-	[80] = PINGROUP(80, sec_mi2s, NA, qdss_gpio12, NA, NA, NA, NA,
+	[80] = PINGROUP(80, NORTH, sec_mi2s, NA, qdss_gpio12, NA, NA, NA, NA,
 			NA, NA),
-	[81] = PINGROUP(81, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
-	[82] = PINGROUP(82, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
-	[83] = PINGROUP(83, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
-	[84] = PINGROUP(84, qup15, NA, NA, NA, NA, NA, NA, NA, NA),
-	[85] = PINGROUP(85, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
-	[86] = PINGROUP(86, qup5, copy_gp, NA, NA, NA, NA, NA, NA, NA),
-	[87] = PINGROUP(87, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
-	[88] = PINGROUP(88, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
-	[89] = PINGROUP(89, tsif1_clk, qup4, tgu_ch3, phase_flag10, NA,
+	[81] = PINGROUP(81, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+	[82] = PINGROUP(82, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+	[83] = PINGROUP(83, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
+	[84] = PINGROUP(84, NORTH, qup15, NA, NA, NA, NA, NA, NA, NA, NA),
+	[85] = PINGROUP(85, SOUTH, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+	[86] = PINGROUP(86, SOUTH, qup5, copy_gp, NA, NA, NA, NA, NA, NA, NA),
+	[87] = PINGROUP(87, SOUTH, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+	[88] = PINGROUP(88, SOUTH, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
+	[89] = PINGROUP(89, SOUTH, tsif1_clk, qup4, tgu_ch3, phase_flag10, NA,
 			NA, NA, NA, NA),
-	[90] = PINGROUP(90, tsif1_en, mdp_vsync0, qup4, mdp_vsync1,
+	[90] = PINGROUP(90, SOUTH, tsif1_en, mdp_vsync0, qup4, mdp_vsync1,
 			mdp_vsync2, mdp_vsync3, tgu_ch0, phase_flag0, qdss_cti),
-	[91] = PINGROUP(91, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA,
+	[91] = PINGROUP(91, SOUTH, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA,
 			qdss_cti, NA, NA, NA),
-	[92] = PINGROUP(92, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2,
+	[92] = PINGROUP(92, SOUTH, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2,
 			NA, NA, NA, NA),
-	[93] = PINGROUP(93, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
+	[93] = PINGROUP(93, SOUTH, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
 			NA, NA, NA, NA),
-	[94] = PINGROUP(94, tsif2_en, sdc42, qup7, NA, NA, NA, NA, NA,
+	[94] = PINGROUP(94, SOUTH, tsif2_en, sdc42, qup7, NA, NA, NA, NA, NA,
 			NA),
-	[95] = PINGROUP(95, tsif2_data, sdc41, qup7, GP_PDM0, NA, NA,
+	[95] = PINGROUP(95, SOUTH, tsif2_data, sdc41, qup7, GP_PDM0, NA, NA,
 			NA, NA, NA),
-	[96] = PINGROUP(96, tsif2_sync, sdc40, qup7, phase_flag3, NA,
+	[96] = PINGROUP(96, SOUTH, tsif2_sync, sdc40, qup7, phase_flag3, NA,
 			NA, NA, NA, NA),
-	[97] = PINGROUP(97, NA, NA, mdp_vsync, ldo_en, NA, NA, NA, NA,
+	[97] = PINGROUP(97, WEST, NA, NA, mdp_vsync, ldo_en, NA, NA, NA, NA,
 			NA),
-	[98] = PINGROUP(98, NA, mdp_vsync, ldo_update, NA, NA, NA, NA,
+	[98] = PINGROUP(98, WEST, NA, mdp_vsync, ldo_update, NA, NA, NA, NA,
 			NA, NA),
-	[99] = PINGROUP(99, phase_flag14, prng_rosc, NA, NA, NA, NA, NA,
+	[99] = PINGROUP(99, NORTH, phase_flag14, prng_rosc, NA, NA, NA, NA, NA,
 			NA, NA),
-	[100] = PINGROUP(100, phase_flag15, NA, NA, NA, NA, NA, NA, NA,
+	[100] = PINGROUP(100, WEST, phase_flag15, NA, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[101] = PINGROUP(101, NA, phase_flag5, NA, NA, NA, NA, NA, NA,
+	[101] = PINGROUP(101, WEST, NA, phase_flag5, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[102] = PINGROUP(102, pci_e1, prng_rosc, NA, NA, NA, NA, NA, NA,
+	[102] = PINGROUP(102, WEST, pci_e1, prng_rosc, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[103] = PINGROUP(103, pci_e1, COPY_PHASE, NA, NA, NA, NA, NA, NA,
+	[103] = PINGROUP(103, WEST, pci_e1, COPY_PHASE, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[104] = PINGROUP(104, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[105] = PINGROUP(105, uim2_data, qup13, qup_l4, NA, NA, NA, NA,
+	[104] = PINGROUP(104, DUMMY, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[105] = PINGROUP(105, NORTH, uim2_data, qup13, qup_l4, NA, NA, NA, NA,
 			 NA, NA),
-	[106] = PINGROUP(106, uim2_clk, qup13, qup_l5, NA, NA, NA, NA,
+	[106] = PINGROUP(106, NORTH, uim2_clk, qup13, qup_l5, NA, NA, NA, NA,
 			 NA, NA),
-	[107] = PINGROUP(107, uim2_reset, qup13, qup_l6, NA, NA, NA, NA,
+	[107] = PINGROUP(107, NORTH, uim2_reset, qup13, qup_l6, NA, NA, NA, NA,
 			 NA, NA),
-	[108] = PINGROUP(108, uim2_present, qup13, NA, NA, NA, NA, NA,
+	[108] = PINGROUP(108, NORTH, uim2_present, qup13, NA, NA, NA, NA, NA,
 			 NA, NA),
-	[109] = PINGROUP(109, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
-	[110] = PINGROUP(110, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
-	[111] = PINGROUP(111, uim1_reset, NA, NA, NA, NA, NA, NA, NA,
+	[109] = PINGROUP(109, NORTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+	[110] = PINGROUP(110, NORTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+	[111] = PINGROUP(111, NORTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[112] = PINGROUP(112, uim1_present, NA, NA, NA, NA, NA, NA, NA,
+	[112] = PINGROUP(112, NORTH, uim1_present, NA, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[113] = PINGROUP(113, uim_batt, edp_hot, NA, NA, NA, NA, NA, NA,
+	[113] = PINGROUP(113, NORTH, uim_batt, edp_hot, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[114] = PINGROUP(114, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA,
+	[114] = PINGROUP(114, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA,
 			 NA, NA),
-	[115] = PINGROUP(115, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA,
+	[115] = PINGROUP(115, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA,
 			 NA, NA),
-	[116] = PINGROUP(116, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[117] = PINGROUP(117, NA, qdss_gpio0, atest_char, NA, NA, NA,
+	[116] = PINGROUP(116, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[117] = PINGROUP(117, NORTH, NA, qdss_gpio0, atest_char, NA, NA, NA,
 			 NA, NA, NA),
-	[118] = PINGROUP(118, adsp_ext, NA, qdss_gpio1, atest_char3, NA,
+	[118] = PINGROUP(118, NORTH, adsp_ext, NA, qdss_gpio1, atest_char3, NA,
 			 NA, NA, NA, NA),
-	[119] = PINGROUP(119, NA, qdss_gpio2, atest_char2, NA, NA, NA,
+	[119] = PINGROUP(119, NORTH, NA, qdss_gpio2, atest_char2, NA, NA, NA,
 			 NA, NA, NA),
-	[120] = PINGROUP(120, NA, qdss_gpio3, atest_char1, NA, NA, NA,
+	[120] = PINGROUP(120, NORTH, NA, qdss_gpio3, atest_char1, NA, NA, NA,
 			 NA, NA, NA),
-	[121] = PINGROUP(121, NA, qdss_gpio4, atest_char0, NA, NA, NA,
+	[121] = PINGROUP(121, NORTH, NA, qdss_gpio4, atest_char0, NA, NA, NA,
 			 NA, NA, NA),
-	[122] = PINGROUP(122, NA, qdss_gpio5, NA, NA, NA, NA, NA, NA,
+	[122] = PINGROUP(122, NORTH, NA, qdss_gpio5, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[123] = PINGROUP(123, qup_l4, NA, qdss_gpio, NA, NA, NA, NA, NA,
+	[123] = PINGROUP(123, NORTH, qup_l4, NA, qdss_gpio, NA, NA, NA, NA, NA,
 			 NA),
-	[124] = PINGROUP(124, qup_l5, NA, qdss_gpio, NA, NA, NA, NA, NA,
+	[124] = PINGROUP(124, NORTH, qup_l5, NA, qdss_gpio, NA, NA, NA, NA, NA,
 			 NA),
-	[125] = PINGROUP(125, qup_l6, NA, NA, NA, NA, NA, NA, NA, NA),
-	[126] = PINGROUP(126, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[127] = PINGROUP(127, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[128] = PINGROUP(128, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA, NA,
+	[125] = PINGROUP(125, NORTH, qup_l6, NA, NA, NA, NA, NA, NA, NA, NA),
+	[126] = PINGROUP(126, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[127] = PINGROUP(127, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[128] = PINGROUP(128, WEST, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA, NA,
 			 NA, NA),
-	[129] = PINGROUP(129, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA, NA,
+	[129] = PINGROUP(129, WEST, NAV_PPS, NAV_PPS, GPS_TX, NA, NA, NA, NA,
 			 NA, NA),
-	[130] = PINGROUP(130, qlink_request, NA, NA, NA, NA, NA, NA, NA,
+	[130] = PINGROUP(130, WEST, qlink_request, NA, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[131] = PINGROUP(131, qlink_enable, NA, NA, NA, NA, NA, NA, NA,
+	[131] = PINGROUP(131, WEST, qlink_enable, NA, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[132] = PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[133] = PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[134] = PINGROUP(134, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[135] = PINGROUP(135, NA, pa_indicator, NA, NA, NA, NA, NA, NA,
+	[132] = PINGROUP(132, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[133] = PINGROUP(133, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[134] = PINGROUP(134, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[135] = PINGROUP(135, WEST, NA, pa_indicator, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[136] = PINGROUP(136, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[137] = PINGROUP(137, NA, NA, phase_flag26, NA, NA, NA, NA, NA,
+	[136] = PINGROUP(136, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[137] = PINGROUP(137, WEST, NA, NA, phase_flag26, NA, NA, NA, NA, NA,
 			 NA),
-	[138] = PINGROUP(138, NA, NA, phase_flag27, NA, NA, NA, NA, NA,
+	[138] = PINGROUP(138, WEST, NA, NA, phase_flag27, NA, NA, NA, NA, NA,
 			 NA),
-	[139] = PINGROUP(139, NA, phase_flag28, NA, NA, NA, NA, NA, NA,
+	[139] = PINGROUP(139, WEST, NA, phase_flag28, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[140] = PINGROUP(140, NA, NA, phase_flag6, NA, NA, NA, NA, NA,
+	[140] = PINGROUP(140, WEST, NA, NA, phase_flag6, NA, NA, NA, NA, NA,
 			 NA),
-	[141] = PINGROUP(141, NA, phase_flag29, NA, NA, NA, NA, NA, NA,
+	[141] = PINGROUP(141, WEST, NA, phase_flag29, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[142] = PINGROUP(142, NA, phase_flag30, NA, NA, NA, NA, NA, NA,
+	[142] = PINGROUP(142, WEST, NA, phase_flag30, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[143] = PINGROUP(143, NA, NAV_PPS, NAV_PPS, GPS_TX, phase_flag31,
+	[143] = PINGROUP(143, WEST, NA, NAV_PPS, NAV_PPS, GPS_TX, phase_flag31,
 			 NA, NA, NA, NA),
-	[144] = PINGROUP(144, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA),
-	[145] = PINGROUP(145, mss_lte, GPS_TX, NA, NA, NA, NA, NA, NA,
+	[144] = PINGROUP(144, SOUTH, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA),
+	[145] = PINGROUP(145, SOUTH, mss_lte, GPS_TX, NA, NA, NA, NA, NA, NA,
 			 NA),
-	[146] = PINGROUP(146, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[147] = PINGROUP(147, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[148] = PINGROUP(148, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[149] = PINGROUP(149, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[150] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x599000, 15, 0),
-	[151] = SDC_QDSD_PINGROUP(sdc1_clk, 0x599000, 13, 6),
-	[152] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x599000, 11, 3),
-	[153] = SDC_QDSD_PINGROUP(sdc1_data, 0x599000, 9, 0),
-	[154] = SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
-	[155] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
-	[156] = SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
-	[157] = UFS_RESET(ufs_reset, 0x99f000),
+	[146] = PINGROUP(146, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[147] = PINGROUP(147, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[148] = PINGROUP(148, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[149] = PINGROUP(149, WEST, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[150] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x99000, 15, 0),
+	[151] = SDC_QDSD_PINGROUP(sdc1_clk, 0x99000, 13, 6),
+	[152] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x99000, 11, 3),
+	[153] = SDC_QDSD_PINGROUP(sdc1_data, 0x99000, 9, 0),
+	[154] = SDC_QDSD_PINGROUP(sdc2_clk, 0x9a000, 14, 6),
+	[155] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x9a000, 11, 3),
+	[156] = SDC_QDSD_PINGROUP(sdc2_data, 0x9a000, 9, 0),
+	[157] = UFS_RESET(ufs_reset, 0x9f000),
 };
-
 static const struct msm_dir_conn sdm670_dir_conn[] = {
 	{1, 510},
 	{3, 511},
@@ -1663,10 +1663,6 @@ static const struct msm_pinctrl_soc_data sdm670_pinctrl = {
 	.ngpios = 150,
 	.dir_conn = sdm670_dir_conn,
 	.n_dir_conns = ARRAY_SIZE(sdm670_dir_conn),
-	.tile_offsets = sdm670_tile_offsets,
-	.n_tile_offsets = ARRAY_SIZE(sdm670_tile_offsets),
-	.pin_base = sdm670_pin_base,
-	.reg_size = REG_SIZE,
 };
 
 static int sdm670_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845-v2.c b/drivers/pinctrl/qcom/pinctrl-sdm845-v2.c
new file mode 100644
index 0000000..f7d551e
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845-v2.c
@@ -0,0 +1,1805 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname)					\
+	[msm_mux_##fname] = {				\
+		.name = #fname,				\
+		.groups = fname##_groups,		\
+		.ngroups = ARRAY_SIZE(fname##_groups),	\
+	}
+
+#define NORTH	0x00500000
+#define SOUTH	0x00900000
+#define EAST	0x00100000
+#define REG_SIZE 0x1000
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10)	\
+	{						\
+		.name = "gpio" #id,			\
+		.pins = gpio##id##_pins,		\
+		.npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins),	\
+		.funcs = (int[]){			\
+			msm_mux_gpio, /* gpio mode */	\
+			msm_mux_##f1,			\
+			msm_mux_##f2,			\
+			msm_mux_##f3,			\
+			msm_mux_##f4,			\
+			msm_mux_##f5,			\
+			msm_mux_##f6,			\
+			msm_mux_##f7,			\
+			msm_mux_##f8,			\
+			msm_mux_##f9,			\
+			msm_mux_##f10			\
+		},					\
+		.nfuncs = 11,				\
+		.ctl_reg = base + REG_SIZE * id,		\
+		.io_reg = base + 0x4 + REG_SIZE * id,		\
+		.intr_cfg_reg = base + 0x8 + REG_SIZE * id,	\
+		.intr_status_reg = base + 0xc + REG_SIZE * id,	\
+		.intr_target_reg = base + 0x8 + REG_SIZE * id,	\
+		.mux_bit = 2,			\
+		.pull_bit = 0,			\
+		.drv_bit = 6,			\
+		.oe_bit = 9,			\
+		.in_bit = 0,			\
+		.out_bit = 1,			\
+		.intr_enable_bit = 0,		\
+		.intr_status_bit = 0,		\
+		.intr_target_bit = 5,		\
+		.intr_target_kpss_val = 3,	\
+		.intr_raw_status_bit = 4,	\
+		.intr_polarity_bit = 1,		\
+		.intr_detection_bit = 2,	\
+		.intr_detection_width = 2,	\
+	}
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
+	{						\
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = ctl,				\
+		.io_reg = 0,				\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = pull,			\
+		.drv_bit = drv,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = -1,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+
+#define UFS_RESET(pg_name, offset)				\
+	{						\
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = offset,			\
+		.io_reg = offset + 0x4,			\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = 3,				\
+		.drv_bit = 0,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = 0,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+static const struct pinctrl_pin_desc sdm845_pins[] = {
+	PINCTRL_PIN(0, "GPIO_0"),
+	PINCTRL_PIN(1, "GPIO_1"),
+	PINCTRL_PIN(2, "GPIO_2"),
+	PINCTRL_PIN(3, "GPIO_3"),
+	PINCTRL_PIN(4, "GPIO_4"),
+	PINCTRL_PIN(5, "GPIO_5"),
+	PINCTRL_PIN(6, "GPIO_6"),
+	PINCTRL_PIN(7, "GPIO_7"),
+	PINCTRL_PIN(8, "GPIO_8"),
+	PINCTRL_PIN(9, "GPIO_9"),
+	PINCTRL_PIN(10, "GPIO_10"),
+	PINCTRL_PIN(11, "GPIO_11"),
+	PINCTRL_PIN(12, "GPIO_12"),
+	PINCTRL_PIN(13, "GPIO_13"),
+	PINCTRL_PIN(14, "GPIO_14"),
+	PINCTRL_PIN(15, "GPIO_15"),
+	PINCTRL_PIN(16, "GPIO_16"),
+	PINCTRL_PIN(17, "GPIO_17"),
+	PINCTRL_PIN(18, "GPIO_18"),
+	PINCTRL_PIN(19, "GPIO_19"),
+	PINCTRL_PIN(20, "GPIO_20"),
+	PINCTRL_PIN(21, "GPIO_21"),
+	PINCTRL_PIN(22, "GPIO_22"),
+	PINCTRL_PIN(23, "GPIO_23"),
+	PINCTRL_PIN(24, "GPIO_24"),
+	PINCTRL_PIN(25, "GPIO_25"),
+	PINCTRL_PIN(26, "GPIO_26"),
+	PINCTRL_PIN(27, "GPIO_27"),
+	PINCTRL_PIN(28, "GPIO_28"),
+	PINCTRL_PIN(29, "GPIO_29"),
+	PINCTRL_PIN(30, "GPIO_30"),
+	PINCTRL_PIN(31, "GPIO_31"),
+	PINCTRL_PIN(32, "GPIO_32"),
+	PINCTRL_PIN(33, "GPIO_33"),
+	PINCTRL_PIN(34, "GPIO_34"),
+	PINCTRL_PIN(35, "GPIO_35"),
+	PINCTRL_PIN(36, "GPIO_36"),
+	PINCTRL_PIN(37, "GPIO_37"),
+	PINCTRL_PIN(38, "GPIO_38"),
+	PINCTRL_PIN(39, "GPIO_39"),
+	PINCTRL_PIN(40, "GPIO_40"),
+	PINCTRL_PIN(41, "GPIO_41"),
+	PINCTRL_PIN(42, "GPIO_42"),
+	PINCTRL_PIN(43, "GPIO_43"),
+	PINCTRL_PIN(44, "GPIO_44"),
+	PINCTRL_PIN(45, "GPIO_45"),
+	PINCTRL_PIN(46, "GPIO_46"),
+	PINCTRL_PIN(47, "GPIO_47"),
+	PINCTRL_PIN(48, "GPIO_48"),
+	PINCTRL_PIN(49, "GPIO_49"),
+	PINCTRL_PIN(50, "GPIO_50"),
+	PINCTRL_PIN(51, "GPIO_51"),
+	PINCTRL_PIN(52, "GPIO_52"),
+	PINCTRL_PIN(53, "GPIO_53"),
+	PINCTRL_PIN(54, "GPIO_54"),
+	PINCTRL_PIN(55, "GPIO_55"),
+	PINCTRL_PIN(56, "GPIO_56"),
+	PINCTRL_PIN(57, "GPIO_57"),
+	PINCTRL_PIN(58, "GPIO_58"),
+	PINCTRL_PIN(59, "GPIO_59"),
+	PINCTRL_PIN(60, "GPIO_60"),
+	PINCTRL_PIN(61, "GPIO_61"),
+	PINCTRL_PIN(62, "GPIO_62"),
+	PINCTRL_PIN(63, "GPIO_63"),
+	PINCTRL_PIN(64, "GPIO_64"),
+	PINCTRL_PIN(65, "GPIO_65"),
+	PINCTRL_PIN(66, "GPIO_66"),
+	PINCTRL_PIN(67, "GPIO_67"),
+	PINCTRL_PIN(68, "GPIO_68"),
+	PINCTRL_PIN(69, "GPIO_69"),
+	PINCTRL_PIN(70, "GPIO_70"),
+	PINCTRL_PIN(71, "GPIO_71"),
+	PINCTRL_PIN(72, "GPIO_72"),
+	PINCTRL_PIN(73, "GPIO_73"),
+	PINCTRL_PIN(74, "GPIO_74"),
+	PINCTRL_PIN(75, "GPIO_75"),
+	PINCTRL_PIN(76, "GPIO_76"),
+	PINCTRL_PIN(77, "GPIO_77"),
+	PINCTRL_PIN(78, "GPIO_78"),
+	PINCTRL_PIN(79, "GPIO_79"),
+	PINCTRL_PIN(80, "GPIO_80"),
+	PINCTRL_PIN(81, "GPIO_81"),
+	PINCTRL_PIN(82, "GPIO_82"),
+	PINCTRL_PIN(83, "GPIO_83"),
+	PINCTRL_PIN(84, "GPIO_84"),
+	PINCTRL_PIN(85, "GPIO_85"),
+	PINCTRL_PIN(86, "GPIO_86"),
+	PINCTRL_PIN(87, "GPIO_87"),
+	PINCTRL_PIN(88, "GPIO_88"),
+	PINCTRL_PIN(89, "GPIO_89"),
+	PINCTRL_PIN(90, "GPIO_90"),
+	PINCTRL_PIN(91, "GPIO_91"),
+	PINCTRL_PIN(92, "GPIO_92"),
+	PINCTRL_PIN(93, "GPIO_93"),
+	PINCTRL_PIN(94, "GPIO_94"),
+	PINCTRL_PIN(95, "GPIO_95"),
+	PINCTRL_PIN(96, "GPIO_96"),
+	PINCTRL_PIN(97, "GPIO_97"),
+	PINCTRL_PIN(98, "GPIO_98"),
+	PINCTRL_PIN(99, "GPIO_99"),
+	PINCTRL_PIN(100, "GPIO_100"),
+	PINCTRL_PIN(101, "GPIO_101"),
+	PINCTRL_PIN(102, "GPIO_102"),
+	PINCTRL_PIN(103, "GPIO_103"),
+	PINCTRL_PIN(104, "GPIO_104"),
+	PINCTRL_PIN(105, "GPIO_105"),
+	PINCTRL_PIN(106, "GPIO_106"),
+	PINCTRL_PIN(107, "GPIO_107"),
+	PINCTRL_PIN(108, "GPIO_108"),
+	PINCTRL_PIN(109, "GPIO_109"),
+	PINCTRL_PIN(110, "GPIO_110"),
+	PINCTRL_PIN(111, "GPIO_111"),
+	PINCTRL_PIN(112, "GPIO_112"),
+	PINCTRL_PIN(113, "GPIO_113"),
+	PINCTRL_PIN(114, "GPIO_114"),
+	PINCTRL_PIN(115, "GPIO_115"),
+	PINCTRL_PIN(116, "GPIO_116"),
+	PINCTRL_PIN(117, "GPIO_117"),
+	PINCTRL_PIN(118, "GPIO_118"),
+	PINCTRL_PIN(119, "GPIO_119"),
+	PINCTRL_PIN(120, "GPIO_120"),
+	PINCTRL_PIN(121, "GPIO_121"),
+	PINCTRL_PIN(122, "GPIO_122"),
+	PINCTRL_PIN(123, "GPIO_123"),
+	PINCTRL_PIN(124, "GPIO_124"),
+	PINCTRL_PIN(125, "GPIO_125"),
+	PINCTRL_PIN(126, "GPIO_126"),
+	PINCTRL_PIN(127, "GPIO_127"),
+	PINCTRL_PIN(128, "GPIO_128"),
+	PINCTRL_PIN(129, "GPIO_129"),
+	PINCTRL_PIN(130, "GPIO_130"),
+	PINCTRL_PIN(131, "GPIO_131"),
+	PINCTRL_PIN(132, "GPIO_132"),
+	PINCTRL_PIN(133, "GPIO_133"),
+	PINCTRL_PIN(134, "GPIO_134"),
+	PINCTRL_PIN(135, "GPIO_135"),
+	PINCTRL_PIN(136, "GPIO_136"),
+	PINCTRL_PIN(137, "GPIO_137"),
+	PINCTRL_PIN(138, "GPIO_138"),
+	PINCTRL_PIN(139, "GPIO_139"),
+	PINCTRL_PIN(140, "GPIO_140"),
+	PINCTRL_PIN(141, "GPIO_141"),
+	PINCTRL_PIN(142, "GPIO_142"),
+	PINCTRL_PIN(143, "GPIO_143"),
+	PINCTRL_PIN(144, "GPIO_144"),
+	PINCTRL_PIN(145, "GPIO_145"),
+	PINCTRL_PIN(146, "GPIO_146"),
+	PINCTRL_PIN(147, "GPIO_147"),
+	PINCTRL_PIN(148, "GPIO_148"),
+	PINCTRL_PIN(149, "GPIO_149"),
+	PINCTRL_PIN(150, "SDC2_CLK"),
+	PINCTRL_PIN(151, "SDC2_CMD"),
+	PINCTRL_PIN(152, "SDC2_DATA"),
+	PINCTRL_PIN(153, "UFS_RESET"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+	static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+
+static const unsigned int sdc2_clk_pins[] = { 150 };
+static const unsigned int sdc2_cmd_pins[] = { 151 };
+static const unsigned int sdc2_data_pins[] = { 152 };
+static const unsigned int ufs_reset_pins[] = { 153 };
+
+enum sdm845_functions {
+	msm_mux_ddr_pxi3,
+	msm_mux_cam_mclk,
+	msm_mux_pll_bypassnl,
+	msm_mux_qdss_gpio0,
+	msm_mux_pll_reset,
+	msm_mux_qdss_gpio1,
+	msm_mux_qdss_gpio2,
+	msm_mux_qdss_gpio3,
+	msm_mux_cci_i2c,
+	msm_mux_qup1,
+	msm_mux_qdss_gpio4,
+	msm_mux_qdss_gpio5,
+	msm_mux_qdss_gpio6,
+	msm_mux_qdss_gpio7,
+	msm_mux_cci_timer0,
+	msm_mux_gcc_gp2,
+	msm_mux_qdss_gpio8,
+	msm_mux_cci_timer1,
+	msm_mux_gcc_gp3,
+	msm_mux_qdss_gpio,
+	msm_mux_cci_timer2,
+	msm_mux_qdss_gpio9,
+	msm_mux_cci_timer3,
+	msm_mux_cci_async,
+	msm_mux_qdss_gpio10,
+	msm_mux_cci_timer4,
+	msm_mux_qdss_gpio11,
+	msm_mux_qdss_gpio12,
+	msm_mux_qup2,
+	msm_mux_qdss_gpio13,
+	msm_mux_qdss_gpio14,
+	msm_mux_phase_flag1,
+	msm_mux_qdss_gpio15,
+	msm_mux_phase_flag2,
+	msm_mux_qup11,
+	msm_mux_qup14,
+	msm_mux_pci_e0,
+	msm_mux_jitter_bist,
+	msm_mux_pll_bist,
+	msm_mux_atest_tsens,
+	msm_mux_agera_pll,
+	msm_mux_usb_phy,
+	msm_mux_lpass_slimbus,
+	msm_mux_sd_write,
+	msm_mux_tsif1_error,
+	msm_mux_qup3,
+	msm_mux_qup6,
+	msm_mux_qup12,
+	msm_mux_phase_flag16,
+	msm_mux_qup10,
+	msm_mux_phase_flag11,
+	msm_mux_phase_flag12,
+	msm_mux_phase_flag13,
+	msm_mux_phase_flag17,
+	msm_mux_qua_mi2s,
+	msm_mux_gcc_gp1,
+	msm_mux_phase_flag18,
+	msm_mux_phase_flag19,
+	msm_mux_phase_flag20,
+	msm_mux_cri_trng0,
+	msm_mux_phase_flag21,
+	msm_mux_cri_trng1,
+	msm_mux_phase_flag22,
+	msm_mux_cri_trng,
+	msm_mux_phase_flag23,
+	msm_mux_phase_flag24,
+	msm_mux_pri_mi2s,
+	msm_mux_sp_cmu,
+	msm_mux_phase_flag25,
+	msm_mux_qup8,
+	msm_mux_pri_mi2s_ws,
+	msm_mux_spkr_i2s,
+	msm_mux_audio_ref,
+	msm_mux_tsense_pwm1,
+	msm_mux_tsense_pwm2,
+	msm_mux_btfm_slimbus,
+	msm_mux_atest_usb2,
+	msm_mux_ter_mi2s,
+	msm_mux_phase_flag7,
+	msm_mux_atest_usb23,
+	msm_mux_phase_flag8,
+	msm_mux_atest_usb22,
+	msm_mux_phase_flag9,
+	msm_mux_atest_usb21,
+	msm_mux_phase_flag4,
+	msm_mux_atest_usb20,
+	msm_mux_sec_mi2s,
+	msm_mux_qup15,
+	msm_mux_qup5,
+	msm_mux_tsif1_clk,
+	msm_mux_qup4,
+	msm_mux_qspi_cs,
+	msm_mux_tgu_ch3,
+	msm_mux_phase_flag10,
+	msm_mux_tsif1_en,
+	msm_mux_mdp_vsync0,
+	msm_mux_mdp_vsync1,
+	msm_mux_mdp_vsync2,
+	msm_mux_mdp_vsync3,
+	msm_mux_tgu_ch0,
+	msm_mux_phase_flag0,
+	msm_mux_tsif1_data,
+	msm_mux_sdc4_cmd,
+	msm_mux_qspi0,
+	msm_mux_tgu_ch1,
+	msm_mux_tsif2_error,
+	msm_mux_sdc43,
+	msm_mux_qspi1,
+	msm_mux_vfr_1,
+	msm_mux_tgu_ch2,
+	msm_mux_tsif2_clk,
+	msm_mux_sdc4_clk,
+	msm_mux_qup7,
+	msm_mux_qspi2,
+	msm_mux_tsif2_en,
+	msm_mux_sdc42,
+	msm_mux_qspi3,
+	msm_mux_tsif2_data,
+	msm_mux_sdc41,
+	msm_mux_qspi_clk,
+	msm_mux_tsif2_sync,
+	msm_mux_sdc40,
+	msm_mux_phase_flag3,
+	msm_mux_ldo_en,
+	msm_mux_ldo_update,
+	msm_mux_phase_flag14,
+	msm_mux_phase_flag15,
+	msm_mux_pci_e1,
+	msm_mux_prng_rosc,
+	msm_mux_phase_flag5,
+	msm_mux_uim2_data,
+	msm_mux_qup13,
+	msm_mux_uim2_clk,
+	msm_mux_uim2_reset,
+	msm_mux_uim2_present,
+	msm_mux_uim1_data,
+	msm_mux_uim1_clk,
+	msm_mux_uim1_reset,
+	msm_mux_uim1_present,
+	msm_mux_uim_batt,
+	msm_mux_edp_hot,
+	msm_mux_nav_pps,
+	msm_mux_atest_char,
+	msm_mux_adsp_ext,
+	msm_mux_atest_char3,
+	msm_mux_atest_char2,
+	msm_mux_atest_char1,
+	msm_mux_atest_char0,
+	msm_mux_qlink_request,
+	msm_mux_qlink_enable,
+	msm_mux_pa_indicator,
+	msm_mux_phase_flag26,
+	msm_mux_phase_flag27,
+	msm_mux_phase_flag28,
+	msm_mux_phase_flag6,
+	msm_mux_phase_flag29,
+	msm_mux_phase_flag30,
+	msm_mux_phase_flag31,
+	msm_mux_mss_lte,
+	msm_mux_qup0,
+	msm_mux_gpio,
+	msm_mux_qup9,
+	msm_mux_qdss_cti,
+	msm_mux_ddr_pxi0,
+	msm_mux_ddr_bist,
+	msm_mux_atest_tsens2,
+	msm_mux_vsense_trigger,
+	msm_mux_atest_usb1,
+	msm_mux_qup_l4,
+	msm_mux_wlan1_adc1,
+	msm_mux_atest_usb13,
+	msm_mux_ddr_pxi1,
+	msm_mux_qup_l5,
+	msm_mux_wlan1_adc0,
+	msm_mux_atest_usb12,
+	msm_mux_mdp_vsync,
+	msm_mux_qup_l6,
+	msm_mux_wlan2_adc1,
+	msm_mux_atest_usb11,
+	msm_mux_ddr_pxi2,
+	msm_mux_edp_lcd,
+	msm_mux_dbg_out,
+	msm_mux_wlan2_adc0,
+	msm_mux_atest_usb10,
+	msm_mux_m_voc,
+	msm_mux_tsif1_sync,
+	msm_mux_NA,
+};
+
+static const char * const ddr_pxi3_groups[] = {
+	"gpio12", "gpio13",
+};
+static const char * const cam_mclk_groups[] = {
+	"gpio13", "gpio14", "gpio15", "gpio16",
+};
+static const char * const pll_bypassnl_groups[] = {
+	"gpio13",
+};
+static const char * const qdss_gpio0_groups[] = {
+	"gpio13", "gpio117",
+};
+static const char * const pll_reset_groups[] = {
+	"gpio14",
+};
+static const char * const qdss_gpio1_groups[] = {
+	"gpio14", "gpio118",
+};
+static const char * const qdss_gpio2_groups[] = {
+	"gpio15", "gpio119",
+};
+static const char * const qdss_gpio3_groups[] = {
+	"gpio16", "gpio120",
+};
+static const char * const cci_i2c_groups[] = {
+	"gpio17", "gpio18", "gpio19", "gpio20",
+};
+static const char * const qup1_groups[] = {
+	"gpio17", "gpio18", "gpio19", "gpio20",
+};
+static const char * const qdss_gpio4_groups[] = {
+	"gpio17", "gpio121",
+};
+static const char * const qdss_gpio5_groups[] = {
+	"gpio18", "gpio122",
+};
+static const char * const qdss_gpio6_groups[] = {
+	"gpio19", "gpio41",
+};
+static const char * const qdss_gpio7_groups[] = {
+	"gpio20", "gpio42",
+};
+static const char * const cci_timer0_groups[] = {
+	"gpio21",
+};
+static const char * const gcc_gp2_groups[] = {
+	"gpio21", "gpio58",
+};
+static const char * const qdss_gpio8_groups[] = {
+	"gpio21", "gpio75",
+};
+static const char * const cci_timer1_groups[] = {
+	"gpio22",
+};
+static const char * const gcc_gp3_groups[] = {
+	"gpio22", "gpio59",
+};
+static const char * const qdss_gpio_groups[] = {
+	"gpio22", "gpio30", "gpio123", "gpio124",
+};
+static const char * const cci_timer2_groups[] = {
+	"gpio23",
+};
+static const char * const qdss_gpio9_groups[] = {
+	"gpio23", "gpio76",
+};
+static const char * const cci_timer3_groups[] = {
+	"gpio24",
+};
+static const char * const cci_async_groups[] = {
+	"gpio24", "gpio25", "gpio26",
+};
+static const char * const qdss_gpio10_groups[] = {
+	"gpio24", "gpio77",
+};
+static const char * const cci_timer4_groups[] = {
+	"gpio25",
+};
+static const char * const qdss_gpio11_groups[] = {
+	"gpio25", "gpio79",
+};
+static const char * const qdss_gpio12_groups[] = {
+	"gpio26", "gpio80",
+};
+static const char * const qup2_groups[] = {
+	"gpio27", "gpio28", "gpio29", "gpio30",
+};
+static const char * const qdss_gpio13_groups[] = {
+	"gpio27", "gpio93",
+};
+static const char * const qdss_gpio14_groups[] = {
+	"gpio28", "gpio43",
+};
+static const char * const phase_flag1_groups[] = {
+	"gpio29",
+};
+static const char * const qdss_gpio15_groups[] = {
+	"gpio29", "gpio44",
+};
+static const char * const phase_flag2_groups[] = {
+	"gpio30",
+};
+static const char * const qup11_groups[] = {
+	"gpio31", "gpio32", "gpio33", "gpio34",
+};
+static const char * const qup14_groups[] = {
+	"gpio31", "gpio32", "gpio33", "gpio34",
+};
+static const char * const pci_e0_groups[] = {
+	"gpio35", "gpio36",
+};
+static const char * const jitter_bist_groups[] = {
+	"gpio35",
+};
+static const char * const pll_bist_groups[] = {
+	"gpio36",
+};
+static const char * const atest_tsens_groups[] = {
+	"gpio36",
+};
+static const char * const agera_pll_groups[] = {
+	"gpio37",
+};
+static const char * const usb_phy_groups[] = {
+	"gpio38",
+};
+static const char * const lpass_slimbus_groups[] = {
+	"gpio39", "gpio70", "gpio71", "gpio72",
+};
+static const char * const sd_write_groups[] = {
+	"gpio40",
+};
+static const char * const tsif1_error_groups[] = {
+	"gpio40",
+};
+static const char * const qup3_groups[] = {
+	"gpio41", "gpio42", "gpio43", "gpio44",
+};
+static const char * const qup6_groups[] = {
+	"gpio45", "gpio46", "gpio47", "gpio48",
+};
+static const char * const qup12_groups[] = {
+	"gpio49", "gpio50", "gpio51", "gpio52",
+};
+static const char * const phase_flag16_groups[] = {
+	"gpio52",
+};
+static const char * const qup10_groups[] = {
+	"gpio53", "gpio54", "gpio55", "gpio56",
+};
+static const char * const phase_flag11_groups[] = {
+	"gpio53",
+};
+static const char * const phase_flag12_groups[] = {
+	"gpio54",
+};
+static const char * const phase_flag13_groups[] = {
+	"gpio55",
+};
+static const char * const phase_flag17_groups[] = {
+	"gpio56",
+};
+static const char * const qua_mi2s_groups[] = {
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+};
+static const char * const gcc_gp1_groups[] = {
+	"gpio57", "gpio78",
+};
+static const char * const phase_flag18_groups[] = {
+	"gpio57",
+};
+static const char * const phase_flag19_groups[] = {
+	"gpio58",
+};
+static const char * const phase_flag20_groups[] = {
+	"gpio59",
+};
+static const char * const cri_trng0_groups[] = {
+	"gpio60",
+};
+static const char * const phase_flag21_groups[] = {
+	"gpio60",
+};
+static const char * const cri_trng1_groups[] = {
+	"gpio61",
+};
+static const char * const phase_flag22_groups[] = {
+	"gpio61",
+};
+static const char * const cri_trng_groups[] = {
+	"gpio62",
+};
+static const char * const phase_flag23_groups[] = {
+	"gpio62",
+};
+static const char * const phase_flag24_groups[] = {
+	"gpio63",
+};
+static const char * const pri_mi2s_groups[] = {
+	"gpio64", "gpio65", "gpio67", "gpio68",
+};
+static const char * const sp_cmu_groups[] = {
+	"gpio64",
+};
+static const char * const phase_flag25_groups[] = {
+	"gpio64",
+};
+static const char * const qup8_groups[] = {
+	"gpio65", "gpio66", "gpio67", "gpio68",
+};
+static const char * const pri_mi2s_ws_groups[] = {
+	"gpio66",
+};
+static const char * const spkr_i2s_groups[] = {
+	"gpio69", "gpio70", "gpio71", "gpio72",
+};
+static const char * const audio_ref_groups[] = {
+	"gpio69",
+};
+static const char * const tsense_pwm1_groups[] = {
+	"gpio71",
+};
+static const char * const tsense_pwm2_groups[] = {
+	"gpio71",
+};
+static const char * const btfm_slimbus_groups[] = {
+	"gpio73", "gpio74",
+};
+static const char * const atest_usb2_groups[] = {
+	"gpio73",
+};
+static const char * const ter_mi2s_groups[] = {
+	"gpio74", "gpio75", "gpio76", "gpio77", "gpio78",
+};
+static const char * const phase_flag7_groups[] = {
+	"gpio74",
+};
+static const char * const atest_usb23_groups[] = {
+	"gpio74",
+};
+static const char * const phase_flag8_groups[] = {
+	"gpio75",
+};
+static const char * const atest_usb22_groups[] = {
+	"gpio75",
+};
+static const char * const phase_flag9_groups[] = {
+	"gpio76",
+};
+static const char * const atest_usb21_groups[] = {
+	"gpio76",
+};
+static const char * const phase_flag4_groups[] = {
+	"gpio77",
+};
+static const char * const atest_usb20_groups[] = {
+	"gpio77",
+};
+static const char * const sec_mi2s_groups[] = {
+	"gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
+};
+static const char * const qup15_groups[] = {
+	"gpio81", "gpio82", "gpio83", "gpio84",
+};
+static const char * const qup5_groups[] = {
+	"gpio85", "gpio86", "gpio87", "gpio88",
+};
+static const char * const tsif1_clk_groups[] = {
+	"gpio89",
+};
+static const char * const qup4_groups[] = {
+	"gpio89", "gpio90", "gpio91", "gpio92",
+};
+static const char * const qspi_cs_groups[] = {
+	"gpio89", "gpio90",
+};
+static const char * const tgu_ch3_groups[] = {
+	"gpio89",
+};
+static const char * const phase_flag10_groups[] = {
+	"gpio89",
+};
+static const char * const tsif1_en_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync0_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync1_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync2_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync3_groups[] = {
+	"gpio90",
+};
+static const char * const tgu_ch0_groups[] = {
+	"gpio90",
+};
+static const char * const phase_flag0_groups[] = {
+	"gpio90",
+};
+static const char * const tsif1_data_groups[] = {
+	"gpio91",
+};
+static const char * const sdc4_cmd_groups[] = {
+	"gpio91",
+};
+static const char * const qspi0_groups[] = {
+	"gpio91",
+};
+static const char * const tgu_ch1_groups[] = {
+	"gpio91",
+};
+static const char * const tsif2_error_groups[] = {
+	"gpio92",
+};
+static const char * const sdc43_groups[] = {
+	"gpio92",
+};
+static const char * const qspi1_groups[] = {
+	"gpio92",
+};
+static const char * const vfr_1_groups[] = {
+	"gpio92",
+};
+static const char * const tgu_ch2_groups[] = {
+	"gpio92",
+};
+static const char * const tsif2_clk_groups[] = {
+	"gpio93",
+};
+static const char * const sdc4_clk_groups[] = {
+	"gpio93",
+};
+static const char * const qup7_groups[] = {
+	"gpio93", "gpio94", "gpio95", "gpio96",
+};
+static const char * const qspi2_groups[] = {
+	"gpio93",
+};
+static const char * const tsif2_en_groups[] = {
+	"gpio94",
+};
+static const char * const sdc42_groups[] = {
+	"gpio94",
+};
+static const char * const qspi3_groups[] = {
+	"gpio94",
+};
+static const char * const tsif2_data_groups[] = {
+	"gpio95",
+};
+static const char * const sdc41_groups[] = {
+	"gpio95",
+};
+static const char * const qspi_clk_groups[] = {
+	"gpio95",
+};
+static const char * const tsif2_sync_groups[] = {
+	"gpio96",
+};
+static const char * const sdc40_groups[] = {
+	"gpio96",
+};
+static const char * const phase_flag3_groups[] = {
+	"gpio96",
+};
+static const char * const ldo_en_groups[] = {
+	"gpio97",
+};
+static const char * const ldo_update_groups[] = {
+	"gpio98",
+};
+static const char * const phase_flag14_groups[] = {
+	"gpio99",
+};
+static const char * const phase_flag15_groups[] = {
+	"gpio100",
+};
+static const char * const pci_e1_groups[] = {
+	"gpio102", "gpio103",
+};
+static const char * const prng_rosc_groups[] = {
+	"gpio102",
+};
+static const char * const phase_flag5_groups[] = {
+	"gpio103",
+};
+static const char * const uim2_data_groups[] = {
+	"gpio105",
+};
+static const char * const qup13_groups[] = {
+	"gpio105", "gpio106", "gpio107", "gpio108",
+};
+static const char * const uim2_clk_groups[] = {
+	"gpio106",
+};
+static const char * const uim2_reset_groups[] = {
+	"gpio107",
+};
+static const char * const uim2_present_groups[] = {
+	"gpio108",
+};
+static const char * const uim1_data_groups[] = {
+	"gpio109",
+};
+static const char * const uim1_clk_groups[] = {
+	"gpio110",
+};
+static const char * const uim1_reset_groups[] = {
+	"gpio111",
+};
+static const char * const uim1_present_groups[] = {
+	"gpio112",
+};
+static const char * const uim_batt_groups[] = {
+	"gpio113",
+};
+static const char * const edp_hot_groups[] = {
+	"gpio113",
+};
+static const char * const nav_pps_groups[] = {
+	"gpio114", "gpio114", "gpio115", "gpio115", "gpio128", "gpio128",
+	"gpio129", "gpio129", "gpio143", "gpio143",
+};
+static const char * const atest_char_groups[] = {
+	"gpio117",
+};
+static const char * const adsp_ext_groups[] = {
+	"gpio118",
+};
+static const char * const atest_char3_groups[] = {
+	"gpio118",
+};
+static const char * const atest_char2_groups[] = {
+	"gpio119",
+};
+static const char * const atest_char1_groups[] = {
+	"gpio120",
+};
+static const char * const atest_char0_groups[] = {
+	"gpio121",
+};
+static const char * const qlink_request_groups[] = {
+	"gpio130",
+};
+static const char * const qlink_enable_groups[] = {
+	"gpio131",
+};
+static const char * const pa_indicator_groups[] = {
+	"gpio135",
+};
+static const char * const phase_flag26_groups[] = {
+	"gpio137",
+};
+static const char * const phase_flag27_groups[] = {
+	"gpio138",
+};
+static const char * const phase_flag28_groups[] = {
+	"gpio139",
+};
+static const char * const phase_flag6_groups[] = {
+	"gpio140",
+};
+static const char * const phase_flag29_groups[] = {
+	"gpio141",
+};
+static const char * const phase_flag30_groups[] = {
+	"gpio142",
+};
+static const char * const phase_flag31_groups[] = {
+	"gpio143",
+};
+static const char * const mss_lte_groups[] = {
+	"gpio144", "gpio145",
+};
+static const char * const qup0_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const gpio_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+	"gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+	"gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+	"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+	"gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+	"gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+	"gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+	"gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+	"gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+	"gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+	"gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+	"gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+	"gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+	"gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+	"gpio147", "gpio148", "gpio149",
+};
+static const char * const qup9_groups[] = {
+	"gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const qdss_cti_groups[] = {
+	"gpio4", "gpio5", "gpio51", "gpio52", "gpio62", "gpio63", "gpio90",
+	"gpio91",
+};
+static const char * const ddr_pxi0_groups[] = {
+	"gpio6", "gpio7",
+};
+static const char * const ddr_bist_groups[] = {
+	"gpio7", "gpio8", "gpio9", "gpio10",
+};
+static const char * const atest_tsens2_groups[] = {
+	"gpio7",
+};
+static const char * const vsense_trigger_groups[] = {
+	"gpio7",
+};
+static const char * const atest_usb1_groups[] = {
+	"gpio7",
+};
+static const char * const qup_l4_groups[] = {
+	"gpio8", "gpio35", "gpio105", "gpio123",
+};
+static const char * const wlan1_adc1_groups[] = {
+	"gpio8",
+};
+static const char * const atest_usb13_groups[] = {
+	"gpio8",
+};
+static const char * const ddr_pxi1_groups[] = {
+	"gpio8", "gpio9",
+};
+static const char * const qup_l5_groups[] = {
+	"gpio9", "gpio36", "gpio106", "gpio124",
+};
+static const char * const wlan1_adc0_groups[] = {
+	"gpio9",
+};
+static const char * const atest_usb12_groups[] = {
+	"gpio9",
+};
+static const char * const mdp_vsync_groups[] = {
+	"gpio10", "gpio11", "gpio12", "gpio97", "gpio98",
+};
+static const char * const qup_l6_groups[] = {
+	"gpio10", "gpio37", "gpio107", "gpio125",
+};
+static const char * const wlan2_adc1_groups[] = {
+	"gpio10",
+};
+static const char * const atest_usb11_groups[] = {
+	"gpio10",
+};
+static const char * const ddr_pxi2_groups[] = {
+	"gpio10", "gpio11",
+};
+static const char * const edp_lcd_groups[] = {
+	"gpio11",
+};
+static const char * const dbg_out_groups[] = {
+	"gpio11",
+};
+static const char * const wlan2_adc0_groups[] = {
+	"gpio11",
+};
+static const char * const atest_usb10_groups[] = {
+	"gpio11",
+};
+static const char * const m_voc_groups[] = {
+	"gpio12",
+};
+static const char * const tsif1_sync_groups[] = {
+	"gpio12",
+};
+
+static const struct msm_function sdm845_functions[] = {
+	FUNCTION(ddr_pxi3),
+	FUNCTION(cam_mclk),
+	FUNCTION(pll_bypassnl),
+	FUNCTION(qdss_gpio0),
+	FUNCTION(pll_reset),
+	FUNCTION(qdss_gpio1),
+	FUNCTION(qdss_gpio2),
+	FUNCTION(qdss_gpio3),
+	FUNCTION(cci_i2c),
+	FUNCTION(qup1),
+	FUNCTION(qdss_gpio4),
+	FUNCTION(qdss_gpio5),
+	FUNCTION(qdss_gpio6),
+	FUNCTION(qdss_gpio7),
+	FUNCTION(cci_timer0),
+	FUNCTION(gcc_gp2),
+	FUNCTION(qdss_gpio8),
+	FUNCTION(cci_timer1),
+	FUNCTION(gcc_gp3),
+	FUNCTION(qdss_gpio),
+	FUNCTION(cci_timer2),
+	FUNCTION(qdss_gpio9),
+	FUNCTION(cci_timer3),
+	FUNCTION(cci_async),
+	FUNCTION(qdss_gpio10),
+	FUNCTION(cci_timer4),
+	FUNCTION(qdss_gpio11),
+	FUNCTION(qdss_gpio12),
+	FUNCTION(qup2),
+	FUNCTION(qdss_gpio13),
+	FUNCTION(qdss_gpio14),
+	FUNCTION(phase_flag1),
+	FUNCTION(qdss_gpio15),
+	FUNCTION(phase_flag2),
+	FUNCTION(qup11),
+	FUNCTION(qup14),
+	FUNCTION(pci_e0),
+	FUNCTION(jitter_bist),
+	FUNCTION(pll_bist),
+	FUNCTION(atest_tsens),
+	FUNCTION(agera_pll),
+	FUNCTION(usb_phy),
+	FUNCTION(lpass_slimbus),
+	FUNCTION(sd_write),
+	FUNCTION(tsif1_error),
+	FUNCTION(qup3),
+	FUNCTION(qup6),
+	FUNCTION(qup12),
+	FUNCTION(phase_flag16),
+	FUNCTION(qup10),
+	FUNCTION(phase_flag11),
+	FUNCTION(phase_flag12),
+	FUNCTION(phase_flag13),
+	FUNCTION(phase_flag17),
+	FUNCTION(qua_mi2s),
+	FUNCTION(gcc_gp1),
+	FUNCTION(phase_flag18),
+	FUNCTION(phase_flag19),
+	FUNCTION(phase_flag20),
+	FUNCTION(cri_trng0),
+	FUNCTION(phase_flag21),
+	FUNCTION(cri_trng1),
+	FUNCTION(phase_flag22),
+	FUNCTION(cri_trng),
+	FUNCTION(phase_flag23),
+	FUNCTION(phase_flag24),
+	FUNCTION(pri_mi2s),
+	FUNCTION(sp_cmu),
+	FUNCTION(phase_flag25),
+	FUNCTION(qup8),
+	FUNCTION(pri_mi2s_ws),
+	FUNCTION(spkr_i2s),
+	FUNCTION(audio_ref),
+	FUNCTION(tsense_pwm1),
+	FUNCTION(tsense_pwm2),
+	FUNCTION(btfm_slimbus),
+	FUNCTION(atest_usb2),
+	FUNCTION(ter_mi2s),
+	FUNCTION(phase_flag7),
+	FUNCTION(atest_usb23),
+	FUNCTION(phase_flag8),
+	FUNCTION(atest_usb22),
+	FUNCTION(phase_flag9),
+	FUNCTION(atest_usb21),
+	FUNCTION(phase_flag4),
+	FUNCTION(atest_usb20),
+	FUNCTION(sec_mi2s),
+	FUNCTION(qup15),
+	FUNCTION(qup5),
+	FUNCTION(tsif1_clk),
+	FUNCTION(qup4),
+	FUNCTION(qspi_cs),
+	FUNCTION(tgu_ch3),
+	FUNCTION(phase_flag10),
+	FUNCTION(tsif1_en),
+	FUNCTION(mdp_vsync0),
+	FUNCTION(mdp_vsync1),
+	FUNCTION(mdp_vsync2),
+	FUNCTION(mdp_vsync3),
+	FUNCTION(tgu_ch0),
+	FUNCTION(phase_flag0),
+	FUNCTION(tsif1_data),
+	FUNCTION(sdc4_cmd),
+	FUNCTION(qspi0),
+	FUNCTION(tgu_ch1),
+	FUNCTION(tsif2_error),
+	FUNCTION(sdc43),
+	FUNCTION(qspi1),
+	FUNCTION(vfr_1),
+	FUNCTION(tgu_ch2),
+	FUNCTION(tsif2_clk),
+	FUNCTION(sdc4_clk),
+	FUNCTION(qup7),
+	FUNCTION(qspi2),
+	FUNCTION(tsif2_en),
+	FUNCTION(sdc42),
+	FUNCTION(qspi3),
+	FUNCTION(tsif2_data),
+	FUNCTION(sdc41),
+	FUNCTION(qspi_clk),
+	FUNCTION(tsif2_sync),
+	FUNCTION(sdc40),
+	FUNCTION(phase_flag3),
+	FUNCTION(ldo_en),
+	FUNCTION(ldo_update),
+	FUNCTION(phase_flag14),
+	FUNCTION(phase_flag15),
+	FUNCTION(pci_e1),
+	FUNCTION(prng_rosc),
+	FUNCTION(phase_flag5),
+	FUNCTION(uim2_data),
+	FUNCTION(qup13),
+	FUNCTION(uim2_clk),
+	FUNCTION(uim2_reset),
+	FUNCTION(uim2_present),
+	FUNCTION(uim1_data),
+	FUNCTION(uim1_clk),
+	FUNCTION(uim1_reset),
+	FUNCTION(uim1_present),
+	FUNCTION(uim_batt),
+	FUNCTION(edp_hot),
+	FUNCTION(nav_pps),
+	FUNCTION(atest_char),
+	FUNCTION(adsp_ext),
+	FUNCTION(atest_char3),
+	FUNCTION(atest_char2),
+	FUNCTION(atest_char1),
+	FUNCTION(atest_char0),
+	FUNCTION(qlink_request),
+	FUNCTION(qlink_enable),
+	FUNCTION(pa_indicator),
+	FUNCTION(phase_flag26),
+	FUNCTION(phase_flag27),
+	FUNCTION(phase_flag28),
+	FUNCTION(phase_flag6),
+	FUNCTION(phase_flag29),
+	FUNCTION(phase_flag30),
+	FUNCTION(phase_flag31),
+	FUNCTION(mss_lte),
+	FUNCTION(qup0),
+	FUNCTION(gpio),
+	FUNCTION(qup9),
+	FUNCTION(qdss_cti),
+	FUNCTION(ddr_pxi0),
+	FUNCTION(ddr_bist),
+	FUNCTION(atest_tsens2),
+	FUNCTION(vsense_trigger),
+	FUNCTION(atest_usb1),
+	FUNCTION(qup_l4),
+	FUNCTION(wlan1_adc1),
+	FUNCTION(atest_usb13),
+	FUNCTION(ddr_pxi1),
+	FUNCTION(qup_l5),
+	FUNCTION(wlan1_adc0),
+	FUNCTION(atest_usb12),
+	FUNCTION(mdp_vsync),
+	FUNCTION(qup_l6),
+	FUNCTION(wlan2_adc1),
+	FUNCTION(atest_usb11),
+	FUNCTION(ddr_pxi2),
+	FUNCTION(edp_lcd),
+	FUNCTION(dbg_out),
+	FUNCTION(wlan2_adc0),
+	FUNCTION(atest_usb10),
+	FUNCTION(m_voc),
+	FUNCTION(tsif1_sync),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sdm845_groups[] = {
+	[0] = PINGROUP(0, EAST, qup0, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[1] = PINGROUP(1, EAST, qup0, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[2] = PINGROUP(2, EAST, qup0, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[3] = PINGROUP(3, EAST, qup0, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[4] = PINGROUP(4, NORTH, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+		       NA),
+	[5] = PINGROUP(5, NORTH, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+		       NA),
+	[6] = PINGROUP(6, NORTH, qup9, NA, ddr_pxi0, NA, NA, NA, NA, NA, NA,
+		       NA),
+	[7] = PINGROUP(7, NORTH, qup9, ddr_bist, NA, atest_tsens2,
+		       vsense_trigger, atest_usb1, ddr_pxi0, NA, NA, NA),
+	[8] = PINGROUP(8, EAST, qup_l4, NA, ddr_bist, NA, NA, wlan1_adc1,
+		       atest_usb13, ddr_pxi1, NA, NA),
+	[9] = PINGROUP(9, EAST, qup_l5, ddr_bist, NA, wlan1_adc0, atest_usb12,
+		       ddr_pxi1, NA, NA, NA, NA),
+	[10] = PINGROUP(10, EAST, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
+			atest_usb11, ddr_pxi2, NA, NA, NA, NA),
+	[11] = PINGROUP(11, EAST, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
+			atest_usb10, ddr_pxi2, NA, NA, NA, NA),
+	[12] = PINGROUP(12, SOUTH, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, NA,
+			NA, NA, NA, NA, NA),
+	[13] = PINGROUP(13, SOUTH, cam_mclk, pll_bypassnl, qdss_gpio0,
+			ddr_pxi3, NA, NA, NA, NA, NA, NA),
+	[14] = PINGROUP(14, SOUTH, cam_mclk, pll_reset, qdss_gpio1, NA, NA, NA,
+			NA, NA, NA, NA),
+	[15] = PINGROUP(15, SOUTH, cam_mclk, qdss_gpio2, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[16] = PINGROUP(16, SOUTH, cam_mclk, qdss_gpio3, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[17] = PINGROUP(17, SOUTH, cci_i2c, qup1, qdss_gpio4, NA, NA, NA, NA,
+			NA, NA, NA),
+	[18] = PINGROUP(18, SOUTH, cci_i2c, qup1, NA, qdss_gpio5, NA, NA, NA,
+			NA, NA, NA),
+	[19] = PINGROUP(19, SOUTH, cci_i2c, qup1, NA, qdss_gpio6, NA, NA, NA,
+			NA, NA, NA),
+	[20] = PINGROUP(20, SOUTH, cci_i2c, qup1, NA, qdss_gpio7, NA, NA, NA,
+			NA, NA, NA),
+	[21] = PINGROUP(21, SOUTH, cci_timer0, gcc_gp2, qdss_gpio8, NA, NA, NA,
+			NA, NA, NA, NA),
+	[22] = PINGROUP(22, SOUTH, cci_timer1, gcc_gp3, qdss_gpio, NA, NA, NA,
+			NA, NA, NA, NA),
+	[23] = PINGROUP(23, SOUTH, cci_timer2, qdss_gpio9, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[24] = PINGROUP(24, SOUTH, cci_timer3, cci_async, qdss_gpio10, NA, NA,
+			NA, NA, NA, NA, NA),
+	[25] = PINGROUP(25, SOUTH, cci_timer4, cci_async, qdss_gpio11, NA, NA,
+			NA, NA, NA, NA, NA),
+	[26] = PINGROUP(26, SOUTH, cci_async, qdss_gpio12, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[27] = PINGROUP(27, EAST, qup2, qdss_gpio13, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[28] = PINGROUP(28, EAST, qup2, qdss_gpio14, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[29] = PINGROUP(29, EAST, qup2, NA, phase_flag1, qdss_gpio15, NA, NA,
+			NA, NA, NA, NA),
+	[30] = PINGROUP(30, EAST, qup2, phase_flag2, qdss_gpio, NA, NA, NA, NA,
+			NA, NA, NA),
+	[31] = PINGROUP(31, NORTH, qup11, qup14, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[32] = PINGROUP(32, NORTH, qup11, qup14, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[33] = PINGROUP(33, NORTH, qup11, qup14, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[34] = PINGROUP(34, NORTH, qup11, qup14, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[35] = PINGROUP(35, SOUTH, pci_e0, qup_l4, jitter_bist, NA, NA, NA, NA,
+			NA, NA, NA),
+	[36] = PINGROUP(36, SOUTH, pci_e0, qup_l5, pll_bist, NA, atest_tsens,
+			NA, NA, NA, NA, NA),
+	[37] = PINGROUP(37, SOUTH, qup_l6, agera_pll, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[38] = PINGROUP(38, NORTH, usb_phy, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[39] = PINGROUP(39, EAST, lpass_slimbus, NA, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[40] = PINGROUP(40, SOUTH, sd_write, tsif1_error, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[41] = PINGROUP(41, EAST, qup3, NA, qdss_gpio6, NA, NA, NA, NA, NA, NA,
+			NA),
+	[42] = PINGROUP(42, EAST, qup3, NA, qdss_gpio7, NA, NA, NA, NA, NA, NA,
+			NA),
+	[43] = PINGROUP(43, EAST, qup3, NA, qdss_gpio14, NA, NA, NA, NA, NA,
+			NA, NA),
+	[44] = PINGROUP(44, EAST, qup3, NA, qdss_gpio15, NA, NA, NA, NA, NA,
+			NA, NA),
+	[45] = PINGROUP(45, EAST, qup6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[46] = PINGROUP(46, EAST, qup6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[47] = PINGROUP(47, EAST, qup6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[48] = PINGROUP(48, EAST, qup6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[49] = PINGROUP(49, NORTH, qup12, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[50] = PINGROUP(50, NORTH, qup12, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[51] = PINGROUP(51, NORTH, qup12, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[52] = PINGROUP(52, NORTH, qup12, phase_flag16, qdss_cti, NA, NA, NA,
+			NA, NA, NA, NA),
+	[53] = PINGROUP(53, NORTH, qup10, phase_flag11, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[54] = PINGROUP(54, NORTH, qup10, NA, phase_flag12, NA, NA, NA, NA, NA,
+			NA, NA),
+	[55] = PINGROUP(55, NORTH, qup10, phase_flag13, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[56] = PINGROUP(56, NORTH, qup10, phase_flag17, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[57] = PINGROUP(57, NORTH, qua_mi2s, gcc_gp1, phase_flag18, NA, NA, NA,
+			NA, NA, NA, NA),
+	[58] = PINGROUP(58, NORTH, qua_mi2s, gcc_gp2, phase_flag19, NA, NA, NA,
+			NA, NA, NA, NA),
+	[59] = PINGROUP(59, NORTH, qua_mi2s, gcc_gp3, phase_flag20, NA, NA, NA,
+			NA, NA, NA, NA),
+	[60] = PINGROUP(60, NORTH, qua_mi2s, cri_trng0, phase_flag21, NA, NA,
+			NA, NA, NA, NA, NA),
+	[61] = PINGROUP(61, NORTH, qua_mi2s, cri_trng1, phase_flag22, NA, NA,
+			NA, NA, NA, NA, NA),
+	[62] = PINGROUP(62, NORTH, qua_mi2s, cri_trng, phase_flag23, qdss_cti,
+			NA, NA, NA, NA, NA, NA),
+	[63] = PINGROUP(63, NORTH, qua_mi2s, NA, phase_flag24, qdss_cti, NA,
+			NA, NA, NA, NA, NA),
+	[64] = PINGROUP(64, NORTH, pri_mi2s, sp_cmu, phase_flag25, NA, NA, NA,
+			NA, NA, NA, NA),
+	[65] = PINGROUP(65, NORTH, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[66] = PINGROUP(66, NORTH, pri_mi2s_ws, qup8, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[67] = PINGROUP(67, NORTH, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[68] = PINGROUP(68, NORTH, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[69] = PINGROUP(69, EAST, spkr_i2s, audio_ref, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[70] = PINGROUP(70, EAST, lpass_slimbus, spkr_i2s, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[71] = PINGROUP(71, EAST, lpass_slimbus, spkr_i2s, tsense_pwm1,
+			tsense_pwm2, NA, NA, NA, NA, NA, NA),
+	[72] = PINGROUP(72, EAST, lpass_slimbus, spkr_i2s, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[73] = PINGROUP(73, EAST, btfm_slimbus, atest_usb2, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[74] = PINGROUP(74, EAST, btfm_slimbus, ter_mi2s, phase_flag7,
+			atest_usb23, NA, NA, NA, NA, NA, NA),
+	[75] = PINGROUP(75, EAST, ter_mi2s, phase_flag8, qdss_gpio8,
+			atest_usb22, NA, NA, NA, NA, NA, NA),
+	[76] = PINGROUP(76, EAST, ter_mi2s, phase_flag9, qdss_gpio9,
+			atest_usb21, NA, NA, NA, NA, NA, NA),
+	[77] = PINGROUP(77, EAST, ter_mi2s, phase_flag4, qdss_gpio10,
+			atest_usb20, NA, NA, NA, NA, NA, NA),
+	[78] = PINGROUP(78, EAST, ter_mi2s, gcc_gp1, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[79] = PINGROUP(79, NORTH, sec_mi2s, NA, NA, qdss_gpio11, NA, NA, NA,
+			NA, NA, NA),
+	[80] = PINGROUP(80, NORTH, sec_mi2s, NA, qdss_gpio12, NA, NA, NA, NA,
+			NA, NA, NA),
+	[81] = PINGROUP(81, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[82] = PINGROUP(82, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[83] = PINGROUP(83, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[84] = PINGROUP(84, NORTH, qup15, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[85] = PINGROUP(85, EAST, qup5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[86] = PINGROUP(86, EAST, qup5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[87] = PINGROUP(87, EAST, qup5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[88] = PINGROUP(88, EAST, qup5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[89] = PINGROUP(89, SOUTH, tsif1_clk, qup4, qspi_cs, tgu_ch3,
+			phase_flag10, NA, NA, NA, NA, NA),
+	[90] = PINGROUP(90, SOUTH, tsif1_en, mdp_vsync0, qup4, qspi_cs,
+			mdp_vsync1, mdp_vsync2, mdp_vsync3, tgu_ch0,
+			phase_flag0, qdss_cti),
+	[91] = PINGROUP(91, SOUTH, tsif1_data, sdc4_cmd, qup4, qspi0, tgu_ch1,
+			NA, qdss_cti, NA, NA, NA),
+	[92] = PINGROUP(92, SOUTH, tsif2_error, sdc43, qup4, qspi1, vfr_1,
+			tgu_ch2, NA, NA, NA, NA),
+	[93] = PINGROUP(93, SOUTH, tsif2_clk, sdc4_clk, qup7, qspi2, NA,
+			qdss_gpio13, NA, NA, NA, NA),
+	[94] = PINGROUP(94, SOUTH, tsif2_en, sdc42, qup7, qspi3, NA, NA, NA,
+			NA, NA, NA),
+	[95] = PINGROUP(95, SOUTH, tsif2_data, sdc41, qup7, qspi_clk, NA, NA,
+			NA, NA, NA, NA),
+	[96] = PINGROUP(96, SOUTH, tsif2_sync, sdc40, qup7, phase_flag3, NA,
+			NA, NA, NA, NA, NA),
+	[97] = PINGROUP(97, NORTH, NA, NA, mdp_vsync, ldo_en, NA, NA, NA, NA,
+			NA, NA),
+	[98] = PINGROUP(98, NORTH, NA, mdp_vsync, ldo_update, NA, NA, NA, NA,
+			NA, NA, NA),
+	[99] = PINGROUP(99, NORTH, phase_flag14, NA, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[100] = PINGROUP(100, NORTH, phase_flag15, NA, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[101] = PINGROUP(101, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[102] = PINGROUP(102, NORTH, pci_e1, prng_rosc, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[103] = PINGROUP(103, NORTH, pci_e1, phase_flag5, NA, NA, NA, NA, NA,
+			 NA, NA, NA),
+	[104] = PINGROUP(104, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[105] = PINGROUP(105, NORTH, uim2_data, qup13, qup_l4, NA, NA, NA, NA,
+			 NA, NA, NA),
+	[106] = PINGROUP(106, NORTH, uim2_clk, qup13, qup_l5, NA, NA, NA, NA,
+			 NA, NA, NA),
+	[107] = PINGROUP(107, NORTH, uim2_reset, qup13, qup_l6, NA, NA, NA, NA,
+			 NA, NA, NA),
+	[108] = PINGROUP(108, NORTH, uim2_present, qup13, NA, NA, NA, NA, NA,
+			 NA, NA, NA),
+	[109] = PINGROUP(109, NORTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[110] = PINGROUP(110, NORTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[111] = PINGROUP(111, NORTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[112] = PINGROUP(112, NORTH, uim1_present, NA, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[113] = PINGROUP(113, NORTH, uim_batt, edp_hot, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[114] = PINGROUP(114, NORTH, NA, nav_pps, nav_pps, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[115] = PINGROUP(115, NORTH, NA, nav_pps, nav_pps, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[116] = PINGROUP(116, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[117] = PINGROUP(117, NORTH, NA, qdss_gpio0, atest_char, NA, NA, NA,
+			 NA, NA, NA, NA),
+	[118] = PINGROUP(118, NORTH, adsp_ext, NA, qdss_gpio1, atest_char3, NA,
+			 NA, NA, NA, NA, NA),
+	[119] = PINGROUP(119, NORTH, NA, qdss_gpio2, atest_char2, NA, NA, NA,
+			 NA, NA, NA, NA),
+	[120] = PINGROUP(120, NORTH, NA, qdss_gpio3, atest_char1, NA, NA, NA,
+			 NA, NA, NA, NA),
+	[121] = PINGROUP(121, NORTH, NA, qdss_gpio4, atest_char0, NA, NA, NA,
+			 NA, NA, NA, NA),
+	[122] = PINGROUP(122, EAST, NA, qdss_gpio5, NA, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[123] = PINGROUP(123, EAST, qup_l4, NA, qdss_gpio, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[124] = PINGROUP(124, EAST, qup_l5, NA, qdss_gpio, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[125] = PINGROUP(125, EAST, qup_l6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[126] = PINGROUP(126, EAST, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[127] = PINGROUP(127, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[128] = PINGROUP(128, NORTH, nav_pps, nav_pps, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[129] = PINGROUP(129, NORTH, nav_pps, nav_pps, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[130] = PINGROUP(130, NORTH, qlink_request, NA, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[131] = PINGROUP(131, NORTH, qlink_enable, NA, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[132] = PINGROUP(132, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[133] = PINGROUP(133, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[134] = PINGROUP(134, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[135] = PINGROUP(135, NORTH, NA, pa_indicator, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[136] = PINGROUP(136, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[137] = PINGROUP(137, NORTH, NA, NA, phase_flag26, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[138] = PINGROUP(138, NORTH, NA, NA, phase_flag27, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[139] = PINGROUP(139, NORTH, NA, phase_flag28, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[140] = PINGROUP(140, NORTH, NA, NA, phase_flag6, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[141] = PINGROUP(141, NORTH, NA, phase_flag29, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[142] = PINGROUP(142, NORTH, NA, phase_flag30, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[143] = PINGROUP(143, NORTH, NA, nav_pps, nav_pps, NA, phase_flag31,
+			 NA, NA, NA, NA, NA),
+	[144] = PINGROUP(144, NORTH, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[145] = PINGROUP(145, NORTH, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[146] = PINGROUP(146, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[147] = PINGROUP(147, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[148] = PINGROUP(148, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[149] = PINGROUP(149, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[150] = SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
+	[151] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
+	[152] = SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
+	[153] = UFS_RESET(ufs_reset, 0x99f000),
+};
+
+static const struct msm_dir_conn sdm845_dir_conn[] = {
+	{1, 510},
+	{3, 511},
+	{5, 512},
+	{10, 513},
+	{11, 514},
+	{20, 515},
+	{22, 516},
+	{24, 517},
+	{26, 518},
+	{30, 519},
+	{31, 639},
+	{32, 521},
+	{34, 522},
+	{36, 523},
+	{37, 524},
+	{38, 525},
+	{39, 526},
+	{40, 527},
+	{41, 637},
+	{43, 529},
+	{44, 530},
+	{46, 531},
+	{48, 532},
+	{49, 640},
+	{52, 534},
+	{53, 535},
+	{54, 536},
+	{56, 537},
+	{57, 538},
+	{58, 539},
+	{59, 540},
+	{60, 541},
+	{61, 542},
+	{62, 543},
+	{63, 544},
+	{64, 545},
+	{66, 546},
+	{68, 547},
+	{71, 548},
+	{73, 549},
+	{77, 550},
+	{78, 551},
+	{79, 552},
+	{80, 553},
+	{84, 554},
+	{85, 555},
+	{86, 556},
+	{88, 557},
+	{89, 638},
+	{91, 559},
+	{92, 560},
+	{95, 561},
+	{96, 562},
+	{97, 563},
+	{101, 564},
+	{103, 565},
+	{104, 566},
+	{115, 570},
+	{116, 571},
+	{117, 572},
+	{118, 573},
+	{119, 609},
+	{120, 610},
+	{121, 611},
+	{122, 612},
+	{123, 613},
+	{124, 614},
+	{125, 615},
+	{127, 617},
+	{128, 618},
+	{129, 619},
+	{130, 620},
+	{132, 621},
+	{133, 622},
+	{145, 623},
+};
+
+static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
+	.pins = sdm845_pins,
+	.npins = ARRAY_SIZE(sdm845_pins),
+	.functions = sdm845_functions,
+	.nfunctions = ARRAY_SIZE(sdm845_functions),
+	.groups = sdm845_groups,
+	.ngroups = ARRAY_SIZE(sdm845_groups),
+	.ngpios = 150,
+	.dir_conn = sdm845_dir_conn,
+	.n_dir_conns = ARRAY_SIZE(sdm845_dir_conn),
+};
+
+static int sdm845_pinctrl_probe(struct platform_device *pdev)
+{
+	return msm_pinctrl_probe(pdev, &sdm845_pinctrl);
+}
+
+static const struct of_device_id sdm845_pinctrl_of_match[] = {
+	{ .compatible = "qcom,sdm845-pinctrl-v2", },
+	{ },
+};
+
+static struct platform_driver sdm845_pinctrl_driver = {
+	.driver = {
+		.name = "sdm845-v2-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = sdm845_pinctrl_of_match,
+	},
+	.probe = sdm845_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init sdm845_pinctrl_init(void)
+{
+	return platform_driver_register(&sdm845_pinctrl_driver);
+}
+arch_initcall(sdm845_pinctrl_init);
+
+static void __exit sdm845_pinctrl_exit(void)
+{
+	platform_driver_unregister(&sdm845_pinctrl_driver);
+}
+module_exit(sdm845_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sdm845-v2 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sdm845_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index 8faabb0..b7e06b6 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,8 +25,10 @@
 		.ngroups = ARRAY_SIZE(fname##_groups),	\
 	}
 
+#define NORTH	0x00500000
+#define SOUTH	0x00900000
 #define REG_SIZE 0x1000
-#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10)	\
 	{						\
 		.name = "gpio" #id,			\
 		.pins = gpio##id##_pins,		\
@@ -41,14 +43,15 @@
 			msm_mux_##f6,			\
 			msm_mux_##f7,			\
 			msm_mux_##f8,			\
-			msm_mux_##f9			\
+			msm_mux_##f9,			\
+			msm_mux_##f10			\
 		},					\
-		.nfuncs = 10,				\
-		.ctl_reg = REG_SIZE * id,		\
-		.io_reg = 0x4 + REG_SIZE * id,		\
-		.intr_cfg_reg = 0x8 + REG_SIZE * id,	\
-		.intr_status_reg = 0xc + REG_SIZE * id,	\
-		.intr_target_reg = 0x8 + REG_SIZE * id,	\
+		.nfuncs = 11,				\
+		.ctl_reg = base + REG_SIZE * id,		\
+		.io_reg = base + 0x4 + REG_SIZE * id,		\
+		.intr_cfg_reg = base + 0x8 + REG_SIZE * id,	\
+		.intr_status_reg = base + 0xc + REG_SIZE * id,	\
+		.intr_target_reg = base + 0x8 + REG_SIZE * id,	\
 		.mux_bit = 2,			\
 		.pull_bit = 0,			\
 		.drv_bit = 6,			\
@@ -114,10 +117,6 @@
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
 	}
-
-static const u32 sdm845_tile_offsets[] = {0x500000, 0x900000, 0x100000};
-static u32 sdm845_pin_base[150];
-
 static const struct pinctrl_pin_desc sdm845_pins[] = {
 	PINCTRL_PIN(0, "GPIO_0"),
 	PINCTRL_PIN(1, "GPIO_1"),
@@ -1371,255 +1370,278 @@ static const struct msm_function sdm845_functions[] = {
  * Clients would not be able to request these dummy pin groups.
  */
 static const struct msm_pingroup sdm845_groups[] = {
-	[0] = PINGROUP(0, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
-	[1] = PINGROUP(1, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
-	[2] = PINGROUP(2, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
-	[3] = PINGROUP(3, qup0, NA, NA, NA, NA, NA, NA, NA, NA),
-	[4] = PINGROUP(4, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	[5] = PINGROUP(5, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	[6] = PINGROUP(6, qup9, NA, ddr_pxi0, NA, NA, NA, NA, NA, NA),
-	[7] = PINGROUP(7, qup9, ddr_bist, NA, atest_tsens2,
-		       vsense_trigger, atest_usb1, ddr_pxi0, NA, NA),
-	[8] = PINGROUP(8, qup_l4, NA, ddr_bist, NA, NA, wlan1_adc1,
-		       atest_usb13, ddr_pxi1, NA),
-	[9] = PINGROUP(9, qup_l5, ddr_bist, NA, wlan1_adc0, atest_usb12,
-		       ddr_pxi1, NA, NA, NA),
-	[10] = PINGROUP(10, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
-			atest_usb11, ddr_pxi2, NA, NA, NA),
-	[11] = PINGROUP(11, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
-			atest_usb10, ddr_pxi2, NA, NA, NA),
-	[12] = PINGROUP(12, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, NA,
-			NA, NA, NA, NA),
-	[13] = PINGROUP(13, cam_mclk, pll_bypassnl, qdss_gpio0,
-			ddr_pxi3, NA, NA, NA, NA, NA),
-	[14] = PINGROUP(14, cam_mclk, pll_reset, qdss_gpio1, NA, NA, NA,
-			NA, NA, NA),
-	[15] = PINGROUP(15, cam_mclk, qdss_gpio2, NA, NA, NA, NA, NA,
-			NA, NA),
-	[16] = PINGROUP(16, cam_mclk, qdss_gpio3, NA, NA, NA, NA, NA,
-			NA, NA),
-	[17] = PINGROUP(17, cci_i2c, qup1, qdss_gpio4, NA, NA, NA, NA,
-			NA, NA),
-	[18] = PINGROUP(18, cci_i2c, qup1, NA, qdss_gpio5, NA, NA, NA,
-			NA, NA),
-	[19] = PINGROUP(19, cci_i2c, qup1, NA, qdss_gpio6, NA, NA, NA,
-			NA, NA),
-	[20] = PINGROUP(20, cci_i2c, qup1, NA, qdss_gpio7, NA, NA, NA,
-			NA, NA),
-	[21] = PINGROUP(21, cci_timer0, gcc_gp2, qdss_gpio8, NA, NA, NA,
-			NA, NA, NA),
-	[22] = PINGROUP(22, cci_timer1, gcc_gp3, qdss_gpio, NA, NA, NA,
-			NA, NA, NA),
-	[23] = PINGROUP(23, cci_timer2, qdss_gpio9, NA, NA, NA, NA, NA,
-			NA, NA),
-	[24] = PINGROUP(24, cci_timer3, cci_async, qdss_gpio10, NA, NA,
-			NA, NA, NA, NA),
-	[25] = PINGROUP(25, cci_timer4, cci_async, qdss_gpio11, NA, NA,
-			NA, NA, NA, NA),
-	[26] = PINGROUP(26, cci_async, qdss_gpio12, NA, NA, NA, NA, NA,
-			NA, NA),
-	[27] = PINGROUP(27, qup2, qdss_gpio13, NA, NA, NA, NA, NA, NA,
-			NA),
-	[28] = PINGROUP(28, qup2, qdss_gpio14, NA, NA, NA, NA, NA, NA,
-			NA),
-	[29] = PINGROUP(29, qup2, NA, phase_flag1, qdss_gpio15, NA, NA,
-			NA, NA, NA),
-	[30] = PINGROUP(30, qup2, phase_flag2, qdss_gpio, NA, NA, NA, NA,
-			NA, NA),
-	[31] = PINGROUP(31, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
-	[32] = PINGROUP(32, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
-	[33] = PINGROUP(33, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
-	[34] = PINGROUP(34, qup11, qup14, NA, NA, NA, NA, NA, NA, NA),
-	[35] = PINGROUP(35, pci_e0, qup_l4, jitter_bist, NA, NA, NA, NA,
-			NA, NA),
-	[36] = PINGROUP(36, pci_e0, qup_l5, pll_bist, NA, atest_tsens,
-			NA, NA, NA, NA),
-	[37] = PINGROUP(37, qup_l6, agera_pll, NA, NA, NA, NA, NA, NA,
-			NA),
-	[38] = PINGROUP(38, usb_phy, NA, NA, NA, NA, NA, NA, NA, NA),
-	[39] = PINGROUP(39, lpass_slimbus, NA, NA, NA, NA, NA, NA, NA,
-			NA),
-	[40] = PINGROUP(40, sd_write, tsif1_error, NA, NA, NA, NA, NA,
-			NA, NA),
-	[41] = PINGROUP(41, qup3, NA, qdss_gpio6, NA, NA, NA, NA, NA, NA),
-	[42] = PINGROUP(42, qup3, NA, qdss_gpio7, NA, NA, NA, NA, NA, NA),
-	[43] = PINGROUP(43, qup3, NA, qdss_gpio14, NA, NA, NA, NA, NA,
-			NA),
-	[44] = PINGROUP(44, qup3, NA, qdss_gpio15, NA, NA, NA, NA, NA,
-			NA),
-	[45] = PINGROUP(45, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
-	[46] = PINGROUP(46, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
-	[47] = PINGROUP(47, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
-	[48] = PINGROUP(48, qup6, NA, NA, NA, NA, NA, NA, NA, NA),
-	[49] = PINGROUP(49, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
-	[50] = PINGROUP(50, qup12, NA, NA, NA, NA, NA, NA, NA, NA),
-	[51] = PINGROUP(51, qup12, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	[52] = PINGROUP(52, qup12, phase_flag16, qdss_cti, NA, NA, NA,
-			NA, NA, NA),
-	[53] = PINGROUP(53, qup10, phase_flag11, NA, NA, NA, NA, NA, NA,
-			NA),
-	[54] = PINGROUP(54, qup10, NA, phase_flag12, NA, NA, NA, NA, NA,
-			NA),
-	[55] = PINGROUP(55, qup10, phase_flag13, NA, NA, NA, NA, NA, NA,
-			NA),
-	[56] = PINGROUP(56, qup10, phase_flag17, NA, NA, NA, NA, NA, NA,
-			NA),
-	[57] = PINGROUP(57, qua_mi2s, gcc_gp1, phase_flag18, NA, NA, NA,
-			NA, NA, NA),
-	[58] = PINGROUP(58, qua_mi2s, gcc_gp2, phase_flag19, NA, NA, NA,
-			NA, NA, NA),
-	[59] = PINGROUP(59, qua_mi2s, gcc_gp3, phase_flag20, NA, NA, NA,
-			NA, NA, NA),
-	[60] = PINGROUP(60, qua_mi2s, cri_trng0, phase_flag21, NA, NA,
-			NA, NA, NA, NA),
-	[61] = PINGROUP(61, qua_mi2s, cri_trng1, phase_flag22, NA, NA,
-			NA, NA, NA, NA),
-	[62] = PINGROUP(62, qua_mi2s, cri_trng, phase_flag23, qdss_cti,
+	[0] = PINGROUP(0, NORTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[1] = PINGROUP(1, NORTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[2] = PINGROUP(2, NORTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[3] = PINGROUP(3, NORTH, qup0, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[4] = PINGROUP(4, NORTH, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+		       NA),
+	[5] = PINGROUP(5, NORTH, qup9, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+		       NA),
+	[6] = PINGROUP(6, NORTH, qup9, NA, ddr_pxi0, NA, NA, NA, NA, NA, NA,
+		       NA),
+	[7] = PINGROUP(7, NORTH, qup9, ddr_bist, NA, atest_tsens2,
+		       vsense_trigger, atest_usb1, ddr_pxi0, NA, NA, NA),
+	[8] = PINGROUP(8, NORTH, qup_l4, NA, ddr_bist, NA, NA, wlan1_adc1,
+		       atest_usb13, ddr_pxi1, NA, NA),
+	[9] = PINGROUP(9, NORTH, qup_l5, ddr_bist, NA, wlan1_adc0, atest_usb12,
+		       ddr_pxi1, NA, NA, NA, NA),
+	[10] = PINGROUP(10, NORTH, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
+			atest_usb11, ddr_pxi2, NA, NA, NA, NA),
+	[11] = PINGROUP(11, NORTH, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
+			atest_usb10, ddr_pxi2, NA, NA, NA, NA),
+	[12] = PINGROUP(12, SOUTH, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, NA,
 			NA, NA, NA, NA, NA),
-	[63] = PINGROUP(63, qua_mi2s, NA, phase_flag24, qdss_cti, NA,
+	[13] = PINGROUP(13, SOUTH, cam_mclk, pll_bypassnl, qdss_gpio0,
+			ddr_pxi3, NA, NA, NA, NA, NA, NA),
+	[14] = PINGROUP(14, SOUTH, cam_mclk, pll_reset, qdss_gpio1, NA, NA, NA,
 			NA, NA, NA, NA),
-	[64] = PINGROUP(64, pri_mi2s, sp_cmu, phase_flag25, NA, NA, NA,
+	[15] = PINGROUP(15, SOUTH, cam_mclk, qdss_gpio2, NA, NA, NA, NA, NA,
 			NA, NA, NA),
-	[65] = PINGROUP(65, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA),
-	[66] = PINGROUP(66, pri_mi2s_ws, qup8, NA, NA, NA, NA, NA, NA,
-			NA),
-	[67] = PINGROUP(67, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA),
-	[68] = PINGROUP(68, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA),
-	[69] = PINGROUP(69, spkr_i2s, audio_ref, NA, NA, NA, NA, NA, NA,
-			NA),
-	[70] = PINGROUP(70, lpass_slimbus, spkr_i2s, NA, NA, NA, NA, NA,
-			NA, NA),
-	[71] = PINGROUP(71, lpass_slimbus, spkr_i2s, tsense_pwm1,
-			tsense_pwm2, NA, NA, NA, NA, NA),
-	[72] = PINGROUP(72, lpass_slimbus, spkr_i2s, NA, NA, NA, NA, NA,
-			NA, NA),
-	[73] = PINGROUP(73, btfm_slimbus, atest_usb2, NA, NA, NA, NA, NA,
-			NA, NA),
-	[74] = PINGROUP(74, btfm_slimbus, ter_mi2s, phase_flag7,
-			atest_usb23, NA, NA, NA, NA, NA),
-	[75] = PINGROUP(75, ter_mi2s, phase_flag8, qdss_gpio8,
-			atest_usb22, NA, NA, NA, NA, NA),
-	[76] = PINGROUP(76, ter_mi2s, phase_flag9, qdss_gpio9,
-			atest_usb21, NA, NA, NA, NA, NA),
-	[77] = PINGROUP(77, ter_mi2s, phase_flag4, qdss_gpio10,
-			atest_usb20, NA, NA, NA, NA, NA),
-	[78] = PINGROUP(78, ter_mi2s, gcc_gp1, NA, NA, NA, NA, NA, NA,
-			NA),
-	[79] = PINGROUP(79, sec_mi2s, NA, NA, qdss_gpio11, NA, NA, NA,
-			NA, NA),
-	[80] = PINGROUP(80, sec_mi2s, NA, qdss_gpio12, NA, NA, NA, NA,
-			NA, NA),
-	[81] = PINGROUP(81, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
-	[82] = PINGROUP(82, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
-	[83] = PINGROUP(83, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA),
-	[84] = PINGROUP(84, qup15, NA, NA, NA, NA, NA, NA, NA, NA),
-	[85] = PINGROUP(85, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
-	[86] = PINGROUP(86, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
-	[87] = PINGROUP(87, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
-	[88] = PINGROUP(88, qup5, NA, NA, NA, NA, NA, NA, NA, NA),
-	[89] = PINGROUP(89, tsif1_clk, qup4, tgu_ch3, phase_flag10, NA,
+	[16] = PINGROUP(16, SOUTH, cam_mclk, qdss_gpio3, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[17] = PINGROUP(17, SOUTH, cci_i2c, qup1, qdss_gpio4, NA, NA, NA, NA,
+			NA, NA, NA),
+	[18] = PINGROUP(18, SOUTH, cci_i2c, qup1, NA, qdss_gpio5, NA, NA, NA,
+			NA, NA, NA),
+	[19] = PINGROUP(19, SOUTH, cci_i2c, qup1, NA, qdss_gpio6, NA, NA, NA,
+			NA, NA, NA),
+	[20] = PINGROUP(20, SOUTH, cci_i2c, qup1, NA, qdss_gpio7, NA, NA, NA,
+			NA, NA, NA),
+	[21] = PINGROUP(21, SOUTH, cci_timer0, gcc_gp2, qdss_gpio8, NA, NA, NA,
 			NA, NA, NA, NA),
-	[90] = PINGROUP(90, tsif1_en, mdp_vsync0, qup4, mdp_vsync1,
-			mdp_vsync2, mdp_vsync3, tgu_ch0, phase_flag0, qdss_cti),
-	[91] = PINGROUP(91, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA,
-			qdss_cti, NA, NA, NA),
-	[92] = PINGROUP(92, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2,
+	[22] = PINGROUP(22, SOUTH, cci_timer1, gcc_gp3, qdss_gpio, NA, NA, NA,
 			NA, NA, NA, NA),
-	[93] = PINGROUP(93, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
-			NA, NA, NA, NA),
-	[94] = PINGROUP(94, tsif2_en, sdc42, qup7, NA, NA, NA, NA, NA,
-			NA),
-	[95] = PINGROUP(95, tsif2_data, sdc41, qup7, NA, NA, NA, NA, NA,
-			NA),
-	[96] = PINGROUP(96, tsif2_sync, sdc40, qup7, phase_flag3, NA,
-			NA, NA, NA, NA),
-	[97] = PINGROUP(97, NA, NA, mdp_vsync, ldo_en, NA, NA, NA, NA,
-			NA),
-	[98] = PINGROUP(98, NA, mdp_vsync, ldo_update, NA, NA, NA, NA,
+	[23] = PINGROUP(23, SOUTH, cci_timer2, qdss_gpio9, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[24] = PINGROUP(24, SOUTH, cci_timer3, cci_async, qdss_gpio10, NA, NA,
+			NA, NA, NA, NA, NA),
+	[25] = PINGROUP(25, SOUTH, cci_timer4, cci_async, qdss_gpio11, NA, NA,
+			NA, NA, NA, NA, NA),
+	[26] = PINGROUP(26, SOUTH, cci_async, qdss_gpio12, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[27] = PINGROUP(27, NORTH, qup2, qdss_gpio13, NA, NA, NA, NA, NA, NA,
 			NA, NA),
-	[99] = PINGROUP(99, phase_flag14, NA, NA, NA, NA, NA, NA, NA,
+	[28] = PINGROUP(28, NORTH, qup2, qdss_gpio14, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[29] = PINGROUP(29, NORTH, qup2, NA, phase_flag1, qdss_gpio15, NA, NA,
+			NA, NA, NA, NA),
+	[30] = PINGROUP(30, NORTH, qup2, phase_flag2, qdss_gpio, NA, NA, NA,
+			NA, NA, NA, NA),
+	[31] = PINGROUP(31, NORTH, qup11, qup14, NA, NA, NA, NA, NA, NA, NA,
 			NA),
-	[100] = PINGROUP(100, phase_flag15, NA, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[101] = PINGROUP(101, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[102] = PINGROUP(102, pci_e1, prng_rosc, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[103] = PINGROUP(103, pci_e1, phase_flag5, NA, NA, NA, NA, NA,
+	[32] = PINGROUP(32, NORTH, qup11, qup14, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[33] = PINGROUP(33, NORTH, qup11, qup14, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[34] = PINGROUP(34, NORTH, qup11, qup14, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[35] = PINGROUP(35, SOUTH, pci_e0, qup_l4, jitter_bist, NA, NA, NA, NA,
+			NA, NA, NA),
+	[36] = PINGROUP(36, SOUTH, pci_e0, qup_l5, pll_bist, NA, atest_tsens,
+			NA, NA, NA, NA, NA),
+	[37] = PINGROUP(37, SOUTH, qup_l6, agera_pll, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[38] = PINGROUP(38, NORTH, usb_phy, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[39] = PINGROUP(39, NORTH, lpass_slimbus, NA, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[40] = PINGROUP(40, SOUTH, sd_write, tsif1_error, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[41] = PINGROUP(41, SOUTH, qup3, NA, qdss_gpio6, NA, NA, NA, NA, NA,
+			NA, NA),
+	[42] = PINGROUP(42, SOUTH, qup3, NA, qdss_gpio7, NA, NA, NA, NA, NA,
+			NA, NA),
+	[43] = PINGROUP(43, SOUTH, qup3, NA, qdss_gpio14, NA, NA, NA, NA, NA,
+			NA, NA),
+	[44] = PINGROUP(44, SOUTH, qup3, NA, qdss_gpio15, NA, NA, NA, NA, NA,
+			NA, NA),
+	[45] = PINGROUP(45, NORTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[46] = PINGROUP(46, NORTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[47] = PINGROUP(47, NORTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[48] = PINGROUP(48, NORTH, qup6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[49] = PINGROUP(49, NORTH, qup12, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[50] = PINGROUP(50, NORTH, qup12, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[51] = PINGROUP(51, NORTH, qup12, qdss_cti, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[52] = PINGROUP(52, NORTH, qup12, phase_flag16, qdss_cti, NA, NA, NA,
+			NA, NA, NA, NA),
+	[53] = PINGROUP(53, NORTH, qup10, phase_flag11, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[54] = PINGROUP(54, NORTH, qup10, NA, phase_flag12, NA, NA, NA, NA, NA,
+			NA, NA),
+	[55] = PINGROUP(55, NORTH, qup10, phase_flag13, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[56] = PINGROUP(56, NORTH, qup10, phase_flag17, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[57] = PINGROUP(57, NORTH, qua_mi2s, gcc_gp1, phase_flag18, NA, NA, NA,
+			NA, NA, NA, NA),
+	[58] = PINGROUP(58, NORTH, qua_mi2s, gcc_gp2, phase_flag19, NA, NA, NA,
+			NA, NA, NA, NA),
+	[59] = PINGROUP(59, NORTH, qua_mi2s, gcc_gp3, phase_flag20, NA, NA, NA,
+			NA, NA, NA, NA),
+	[60] = PINGROUP(60, NORTH, qua_mi2s, cri_trng0, phase_flag21, NA, NA,
+			NA, NA, NA, NA, NA),
+	[61] = PINGROUP(61, NORTH, qua_mi2s, cri_trng1, phase_flag22, NA, NA,
+			NA, NA, NA, NA, NA),
+	[62] = PINGROUP(62, NORTH, qua_mi2s, cri_trng, phase_flag23, qdss_cti,
+			NA, NA, NA, NA, NA, NA),
+	[63] = PINGROUP(63, NORTH, qua_mi2s, NA, phase_flag24, qdss_cti, NA,
+			NA, NA, NA, NA, NA),
+	[64] = PINGROUP(64, NORTH, pri_mi2s, sp_cmu, phase_flag25, NA, NA, NA,
+			NA, NA, NA, NA),
+	[65] = PINGROUP(65, NORTH, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[66] = PINGROUP(66, NORTH, pri_mi2s_ws, qup8, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[67] = PINGROUP(67, NORTH, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[68] = PINGROUP(68, NORTH, pri_mi2s, qup8, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[69] = PINGROUP(69, NORTH, spkr_i2s, audio_ref, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[70] = PINGROUP(70, NORTH, lpass_slimbus, spkr_i2s, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[71] = PINGROUP(71, NORTH, lpass_slimbus, spkr_i2s, tsense_pwm1,
+			tsense_pwm2, NA, NA, NA, NA, NA, NA),
+	[72] = PINGROUP(72, NORTH, lpass_slimbus, spkr_i2s, NA, NA, NA, NA, NA,
+			NA, NA, NA),
+	[73] = PINGROUP(73, NORTH, btfm_slimbus, atest_usb2, NA, NA, NA, NA,
+			NA, NA, NA, NA),
+	[74] = PINGROUP(74, NORTH, btfm_slimbus, ter_mi2s, phase_flag7,
+			atest_usb23, NA, NA, NA, NA, NA, NA),
+	[75] = PINGROUP(75, NORTH, ter_mi2s, phase_flag8, qdss_gpio8,
+			atest_usb22, NA, NA, NA, NA, NA, NA),
+	[76] = PINGROUP(76, NORTH, ter_mi2s, phase_flag9, qdss_gpio9,
+			atest_usb21, NA, NA, NA, NA, NA, NA),
+	[77] = PINGROUP(77, NORTH, ter_mi2s, phase_flag4, qdss_gpio10,
+			atest_usb20, NA, NA, NA, NA, NA, NA),
+	[78] = PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[79] = PINGROUP(79, NORTH, sec_mi2s, NA, NA, qdss_gpio11, NA, NA, NA,
+			NA, NA, NA),
+	[80] = PINGROUP(80, NORTH, sec_mi2s, NA, qdss_gpio12, NA, NA, NA, NA,
+			NA, NA, NA),
+	[81] = PINGROUP(81, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[82] = PINGROUP(82, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[83] = PINGROUP(83, NORTH, sec_mi2s, qup15, NA, NA, NA, NA, NA, NA, NA,
+			NA),
+	[84] = PINGROUP(84, NORTH, qup15, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[85] = PINGROUP(85, SOUTH, qup5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[86] = PINGROUP(86, SOUTH, qup5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[87] = PINGROUP(87, SOUTH, qup5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[88] = PINGROUP(88, SOUTH, qup5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[89] = PINGROUP(89, SOUTH, tsif1_clk, qup4, tgu_ch3, phase_flag10, NA,
+			NA, NA, NA, NA, NA),
+	[90] = PINGROUP(90, SOUTH, tsif1_en, mdp_vsync0, qup4, mdp_vsync1,
+			mdp_vsync2, mdp_vsync3, tgu_ch0, phase_flag0, qdss_cti,
+			NA),
+	[91] = PINGROUP(91, SOUTH, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA,
+			qdss_cti, NA, NA, NA, NA),
+	[92] = PINGROUP(92, SOUTH, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2,
+			NA, NA, NA, NA, NA),
+	[93] = PINGROUP(93, SOUTH, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
+			NA, NA, NA, NA, NA),
+	[94] = PINGROUP(94, SOUTH, tsif2_en, sdc42, qup7, NA, NA, NA, NA, NA,
+			NA, NA),
+	[95] = PINGROUP(95, SOUTH, tsif2_data, sdc41, qup7, NA, NA, NA, NA, NA,
+			NA, NA),
+	[96] = PINGROUP(96, SOUTH, tsif2_sync, sdc40, qup7, phase_flag3, NA,
+			NA, NA, NA, NA, NA),
+	[97] = PINGROUP(97, NORTH, NA, NA, mdp_vsync, ldo_en, NA, NA, NA, NA,
+			NA, NA),
+	[98] = PINGROUP(98, NORTH, NA, mdp_vsync, ldo_update, NA, NA, NA, NA,
+			NA, NA, NA),
+	[99] = PINGROUP(99, NORTH, phase_flag14, NA, NA, NA, NA, NA, NA, NA,
+			NA, NA),
+	[100] = PINGROUP(100, NORTH, phase_flag15, NA, NA, NA, NA, NA, NA, NA,
 			 NA, NA),
-	[104] = PINGROUP(104, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[105] = PINGROUP(105, uim2_data, qup13, qup_l4, NA, NA, NA, NA,
+	[101] = PINGROUP(101, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[102] = PINGROUP(102, NORTH, pci_e1, prng_rosc, NA, NA, NA, NA, NA, NA,
 			 NA, NA),
-	[106] = PINGROUP(106, uim2_clk, qup13, qup_l5, NA, NA, NA, NA,
-			 NA, NA),
-	[107] = PINGROUP(107, uim2_reset, qup13, qup_l6, NA, NA, NA, NA,
-			 NA, NA),
-	[108] = PINGROUP(108, uim2_present, qup13, NA, NA, NA, NA, NA,
-			 NA, NA),
-	[109] = PINGROUP(109, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
-	[110] = PINGROUP(110, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
-	[111] = PINGROUP(111, uim1_reset, NA, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[112] = PINGROUP(112, uim1_present, NA, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[113] = PINGROUP(113, uim_batt, edp_hot, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[114] = PINGROUP(114, NA, nav_pps, nav_pps, NA, NA, NA, NA, NA,
-			 NA),
-	[115] = PINGROUP(115, NA, nav_pps, nav_pps, NA, NA, NA, NA, NA,
-			 NA),
-	[116] = PINGROUP(116, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[117] = PINGROUP(117, NA, qdss_gpio0, atest_char, NA, NA, NA,
+	[103] = PINGROUP(103, NORTH, pci_e1, phase_flag5, NA, NA, NA, NA, NA,
 			 NA, NA, NA),
-	[118] = PINGROUP(118, adsp_ext, NA, qdss_gpio1, atest_char3, NA,
+	[104] = PINGROUP(104, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[105] = PINGROUP(105, NORTH, uim2_data, qup13, qup_l4, NA, NA, NA, NA,
+			 NA, NA, NA),
+	[106] = PINGROUP(106, NORTH, uim2_clk, qup13, qup_l5, NA, NA, NA, NA,
+			 NA, NA, NA),
+	[107] = PINGROUP(107, NORTH, uim2_reset, qup13, qup_l6, NA, NA, NA, NA,
+			 NA, NA, NA),
+	[108] = PINGROUP(108, NORTH, uim2_present, qup13, NA, NA, NA, NA, NA,
+			 NA, NA, NA),
+	[109] = PINGROUP(109, NORTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[110] = PINGROUP(110, NORTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[111] = PINGROUP(111, NORTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[112] = PINGROUP(112, NORTH, uim1_present, NA, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[113] = PINGROUP(113, NORTH, uim_batt, edp_hot, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[114] = PINGROUP(114, NORTH, NA, nav_pps, nav_pps, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[115] = PINGROUP(115, NORTH, NA, nav_pps, nav_pps, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[116] = PINGROUP(116, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[117] = PINGROUP(117, NORTH, NA, qdss_gpio0, atest_char, NA, NA, NA,
 			 NA, NA, NA, NA),
-	[119] = PINGROUP(119, NA, qdss_gpio2, atest_char2, NA, NA, NA,
-			 NA, NA, NA),
-	[120] = PINGROUP(120, NA, qdss_gpio3, atest_char1, NA, NA, NA,
-			 NA, NA, NA),
-	[121] = PINGROUP(121, NA, qdss_gpio4, atest_char0, NA, NA, NA,
-			 NA, NA, NA),
-	[122] = PINGROUP(122, NA, qdss_gpio5, NA, NA, NA, NA, NA, NA, NA),
-	[123] = PINGROUP(123, qup_l4, NA, qdss_gpio, NA, NA, NA, NA, NA,
-			 NA),
-	[124] = PINGROUP(124, qup_l5, NA, qdss_gpio, NA, NA, NA, NA, NA,
-			 NA),
-	[125] = PINGROUP(125, qup_l6, NA, NA, NA, NA, NA, NA, NA, NA),
-	[126] = PINGROUP(126, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[127] = PINGROUP(127, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[128] = PINGROUP(128, nav_pps, nav_pps, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[129] = PINGROUP(129, nav_pps, nav_pps, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[130] = PINGROUP(130, qlink_request, NA, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[131] = PINGROUP(131, qlink_enable, NA, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[132] = PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[133] = PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[134] = PINGROUP(134, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[135] = PINGROUP(135, NA, pa_indicator, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[136] = PINGROUP(136, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[137] = PINGROUP(137, NA, NA, phase_flag26, NA, NA, NA, NA, NA,
-			 NA),
-	[138] = PINGROUP(138, NA, NA, phase_flag27, NA, NA, NA, NA, NA,
-			 NA),
-	[139] = PINGROUP(139, NA, phase_flag28, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[140] = PINGROUP(140, NA, NA, phase_flag6, NA, NA, NA, NA, NA,
-			 NA),
-	[141] = PINGROUP(141, NA, phase_flag29, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[142] = PINGROUP(142, NA, phase_flag30, NA, NA, NA, NA, NA, NA,
-			 NA),
-	[143] = PINGROUP(143, NA, nav_pps, nav_pps, NA, phase_flag31,
+	[118] = PINGROUP(118, NORTH, adsp_ext, NA, qdss_gpio1, atest_char3, NA,
+			 NA, NA, NA, NA, NA),
+	[119] = PINGROUP(119, NORTH, NA, qdss_gpio2, atest_char2, NA, NA, NA,
 			 NA, NA, NA, NA),
-	[144] = PINGROUP(144, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA),
-	[145] = PINGROUP(145, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA),
-	[146] = PINGROUP(146, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[147] = PINGROUP(147, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[148] = PINGROUP(148, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	[149] = PINGROUP(149, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[120] = PINGROUP(120, NORTH, NA, qdss_gpio3, atest_char1, NA, NA, NA,
+			 NA, NA, NA, NA),
+	[121] = PINGROUP(121, NORTH, NA, qdss_gpio4, atest_char0, NA, NA, NA,
+			 NA, NA, NA, NA),
+	[122] = PINGROUP(122, NORTH, NA, qdss_gpio5, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[123] = PINGROUP(123, NORTH, qup_l4, NA, qdss_gpio, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[124] = PINGROUP(124, NORTH, qup_l5, NA, qdss_gpio, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[125] = PINGROUP(125, NORTH, qup_l6, NA, NA, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[126] = PINGROUP(126, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[127] = PINGROUP(127, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[128] = PINGROUP(128, NORTH, nav_pps, nav_pps, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[129] = PINGROUP(129, NORTH, nav_pps, nav_pps, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[130] = PINGROUP(130, NORTH, qlink_request, NA, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[131] = PINGROUP(131, NORTH, qlink_enable, NA, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[132] = PINGROUP(132, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[133] = PINGROUP(133, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[134] = PINGROUP(134, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[135] = PINGROUP(135, NORTH, NA, pa_indicator, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[136] = PINGROUP(136, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[137] = PINGROUP(137, NORTH, NA, NA, phase_flag26, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[138] = PINGROUP(138, NORTH, NA, NA, phase_flag27, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[139] = PINGROUP(139, NORTH, NA, phase_flag28, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[140] = PINGROUP(140, NORTH, NA, NA, phase_flag6, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[141] = PINGROUP(141, NORTH, NA, phase_flag29, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[142] = PINGROUP(142, NORTH, NA, phase_flag30, NA, NA, NA, NA, NA, NA,
+			 NA, NA),
+	[143] = PINGROUP(143, NORTH, NA, nav_pps, nav_pps, NA, phase_flag31,
+			 NA, NA, NA, NA, NA),
+	[144] = PINGROUP(144, NORTH, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[145] = PINGROUP(145, NORTH, mss_lte, NA, NA, NA, NA, NA, NA, NA, NA,
+			 NA),
+	[146] = PINGROUP(146, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[147] = PINGROUP(147, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[148] = PINGROUP(148, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[149] = PINGROUP(149, NORTH, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
 	[150] = SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
 	[151] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
 	[152] = SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
@@ -1714,10 +1736,6 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
 	.ngpios = 150,
 	.dir_conn = sdm845_dir_conn,
 	.n_dir_conns = ARRAY_SIZE(sdm845_dir_conn),
-	.tile_offsets = sdm845_tile_offsets,
-	.n_tile_offsets = ARRAY_SIZE(sdm845_tile_offsets),
-	.pin_base = sdm845_pin_base,
-	.reg_size = REG_SIZE,
 };
 
 static int sdm845_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
index 4a21eb6..6ceb39a 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
@@ -85,6 +85,32 @@
 		.intr_enable_bit = -1,			\
 		.intr_status_bit = -1,			\
 		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+
+#define UFS_RESET(pg_name, offset)				\
+	{					        \
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = offset,			\
+		.io_reg = offset + 0x4,			\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = 3,				\
+		.drv_bit = 0,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = 0,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
 		.intr_polarity_bit = -1,		\
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
@@ -190,12 +216,13 @@ static const struct pinctrl_pin_desc sdxpoorwills_pins[] = {
 	PINCTRL_PIN(97, "GPIO_97"),
 	PINCTRL_PIN(98, "GPIO_98"),
 	PINCTRL_PIN(99, "GPIO_99"),
-	PINCTRL_PIN(100, "SDC1_CLK"),
-	PINCTRL_PIN(101, "SDC1_CMD"),
-	PINCTRL_PIN(102, "SDC1_DATA"),
-	PINCTRL_PIN(103, "SDC2_CLK"),
-	PINCTRL_PIN(104, "SDC2_CMD"),
-	PINCTRL_PIN(105, "SDC2_DATA"),
+	PINCTRL_PIN(100, "SDC1_RCLK"),
+	PINCTRL_PIN(101, "SDC1_CLK"),
+	PINCTRL_PIN(102, "SDC1_CMD"),
+	PINCTRL_PIN(103, "SDC1_DATA"),
+	PINCTRL_PIN(104, "SDC2_CLK"),
+	PINCTRL_PIN(105, "SDC2_CMD"),
+	PINCTRL_PIN(106, "SDC2_DATA"),
 };
 
 #define DECLARE_MSM_GPIO_PINS(pin) \
@@ -301,21 +328,22 @@ DECLARE_MSM_GPIO_PINS(97);
 DECLARE_MSM_GPIO_PINS(98);
 DECLARE_MSM_GPIO_PINS(99);
 
-static const unsigned int sdc1_clk_pins[] = { 100 };
-static const unsigned int sdc1_cmd_pins[] = { 101 };
-static const unsigned int sdc1_data_pins[] = { 102 };
-static const unsigned int sdc2_clk_pins[] = { 103 };
-static const unsigned int sdc2_cmd_pins[] = { 104 };
-static const unsigned int sdc2_data_pins[] = { 105 };
+static const unsigned int sdc1_rclk_pins[] = { 100 };
+static const unsigned int sdc1_clk_pins[] = { 101 };
+static const unsigned int sdc1_cmd_pins[] = { 102 };
+static const unsigned int sdc1_data_pins[] = { 103 };
+static const unsigned int sdc2_clk_pins[] = { 104 };
+static const unsigned int sdc2_cmd_pins[] = { 105 };
+static const unsigned int sdc2_data_pins[] = { 106 };
 
 enum sdxpoorwills_functions {
-	msm_mux_qdss_stm31,
-	msm_mux_blsp_uart1,
-	msm_mux_gpio,
 	msm_mux_uim2_data,
+	msm_mux_gpio,
+	msm_mux_qdss_stm31,
 	msm_mux_ebi0_wrcdc,
 	msm_mux_uim2_present,
 	msm_mux_qdss_stm30,
+	msm_mux_blsp_uart1,
 	msm_mux_uim2_reset,
 	msm_mux_blsp_i2c1,
 	msm_mux_qdss_stm29,
@@ -340,14 +368,22 @@ enum sdxpoorwills_functions {
 	msm_mux_blsp_i2c3,
 	msm_mux_gcc_gp3,
 	msm_mux_qdss_stm19,
-	msm_mux_qdss12,
+	msm_mux_qdss4,
 	msm_mux_qdss_stm18,
-	msm_mux_qdss13,
+	msm_mux_qdss5,
 	msm_mux_qdss_stm17,
-	msm_mux_qdss14,
+	msm_mux_qdss6,
 	msm_mux_bimc_dte0,
 	msm_mux_native_tsens,
-	msm_mux_vsense_trigger,
+	msm_mux_qdss_stm16,
+	msm_mux_qdss7,
+	msm_mux_bimc_dte1,
+	msm_mux_sec_mi2s,
+	msm_mux_blsp_spi4,
+	msm_mux_blsp_uart4,
+	msm_mux_qdss_cti,
+	msm_mux_qdss_stm27,
+	msm_mux_qdss8,
 	msm_mux_qdss_stm26,
 	msm_mux_qdss9,
 	msm_mux_blsp_i2c4,
@@ -358,26 +394,19 @@ enum sdxpoorwills_functions {
 	msm_mux_gcc_gp2,
 	msm_mux_qdss_stm24,
 	msm_mux_qdss11,
-	msm_mux_qdss_stm16,
-	msm_mux_qdss15,
-	msm_mux_bimc_dte1,
-	msm_mux_sec_mi2s,
-	msm_mux_blsp_spi4,
-	msm_mux_blsp_uart4,
-	msm_mux_qdss_cti,
-	msm_mux_qdss_stm27,
-	msm_mux_qdss8,
 	msm_mux_ebi2_a,
-	msm_mux_qdss_stm3,
 	msm_mux_ebi2_lcd,
-	msm_mux_qdss_stm2,
 	msm_mux_pll_bist,
-	msm_mux_qdss_stm1,
-	msm_mux_qdss_stm0,
 	msm_mux_adsp_ext,
-	msm_mux_epm1,
+	msm_mux_qdss_stm11,
 	msm_mux_m_voc,
+	msm_mux_qdss_stm10,
 	msm_mux_native_char,
+	msm_mux_native_char3,
+	msm_mux_nav_pps,
+	msm_mux_nav_dr,
+	msm_mux_native_char2,
+	msm_mux_native_tsense,
 	msm_mux_native_char1,
 	msm_mux_pa_indicator,
 	msm_mux_qdss_traceclk,
@@ -386,92 +415,69 @@ enum sdxpoorwills_functions {
 	msm_mux_qlink_req,
 	msm_mux_pll_test,
 	msm_mux_cri_trng,
-	msm_mux_wmss_reset,
-	msm_mux_native_char3,
-	msm_mux_nav_pps,
-	msm_mux_nav_dr,
-	msm_mux_native_char2,
-	msm_mux_native_tsense,
 	msm_mux_prng_rosc,
 	msm_mux_cri_trng0,
 	msm_mux_cri_trng1,
 	msm_mux_pll_ref,
 	msm_mux_coex_uart,
-	msm_mux_qdss_stm11,
-	msm_mux_qdss_stm10,
-	msm_mux_ddr_pxi0,
-	msm_mux_ap2mdm_status,
+	msm_mux_qdss_tracectl,
 	msm_mux_ddr_bist,
-	msm_mux_mdm2ap_status,
-	msm_mux_ap2mdm_err,
-	msm_mux_mdm2ap_err,
-	msm_mux_ap2mdm_vdd,
-	msm_mux_mdm2ap_vdd,
-	msm_mux_ap2mdm_wake,
-	msm_mux_pciehost_rst,
 	msm_mux_blsp_spi1,
-	msm_mux_qdss_stm14,
-	msm_mux_pcie_wake,
-	msm_mux_mdm2ap_wake,
 	msm_mux_pci_e,
+	msm_mux_tgu_ch0,
+	msm_mux_pcie_clkreq,
+	msm_mux_qdss_stm15,
+	msm_mux_qdss_stm14,
 	msm_mux_qdss_stm13,
+	msm_mux_mgpi_clk,
+	msm_mux_qdss_stm12,
+	msm_mux_qdss_stm9,
 	msm_mux_i2s_mclk,
 	msm_mux_audio_ref,
 	msm_mux_ldo_update,
 	msm_mux_qdss_stm8,
 	msm_mux_qdss_stm7,
-	msm_mux_qdss4,
-	msm_mux_tgu_ch0,
-	msm_mux_pcie_clkreq,
-	msm_mux_qdss_stm9,
-	msm_mux_qdss_stm15,
-	msm_mux_mgpi_clk,
-	msm_mux_qdss_stm12,
-	msm_mux_qdss_tracectl,
-	msm_mux_atest_char,
+	msm_mux_qdss12,
 	msm_mux_qdss_stm6,
-	msm_mux_qdss5,
-	msm_mux_atest_char3,
+	msm_mux_qdss13,
 	msm_mux_qdss_stm5,
-	msm_mux_qdss6,
-	msm_mux_atest_char2,
+	msm_mux_qdss14,
 	msm_mux_qdss_stm4,
-	msm_mux_qdss7,
-	msm_mux_atest_char1,
+	msm_mux_qdss15,
 	msm_mux_uim1_data,
-	msm_mux_atest_char0,
+	msm_mux_qdss_stm3,
 	msm_mux_uim1_present,
+	msm_mux_qdss_stm2,
 	msm_mux_uim1_reset,
+	msm_mux_qdss_stm1,
 	msm_mux_uim1_clk,
+	msm_mux_qdss_stm0,
 	msm_mux_dbg_out,
 	msm_mux_gcc_plltest,
-	msm_mux_usb2phy_ac,
 	msm_mux_NA,
 };
 
-static const char * const qdss_stm31_groups[] = {
+static const char * const uim2_data_groups[] = {
 	"gpio0",
 };
-static const char * const blsp_uart1_groups[] = {
-	"gpio0", "gpio1", "gpio2", "gpio3", "gpio20", "gpio21", "gpio22",
-	"gpio23",
-};
 static const char * const gpio_groups[] = {
 	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
 	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
 	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
-	"gpio22", "gpio23", "gpio24", "gpio26", "gpio27", "gpio28", "gpio29",
-	"gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
-	"gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
-	"gpio44", "gpio45", "gpio54", "gpio55", "gpio56", "gpio58", "gpio59",
-	"gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65", "gpio66",
-	"gpio67", "gpio68", "gpio69", "gpio70", "gpio71", "gpio72", "gpio73",
-	"gpio74", "gpio75", "gpio76", "gpio77", "gpio78", "gpio79", "gpio80",
-	"gpio81", "gpio82", "gpio83", "gpio84", "gpio85", "gpio86", "gpio87",
-	"gpio88", "gpio89", "gpio90", "gpio91", "gpio92", "gpio93", "gpio94",
-	"gpio95", "gpio96", "gpio97", "gpio98", "gpio99",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio52", "gpio53", "gpio53", "gpio54",
+	"gpio55", "gpio56", "gpio57", "gpio58", "gpio59", "gpio60", "gpio61",
+	"gpio62", "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68",
+	"gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
+	"gpio76", "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82",
+	"gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
+	"gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96",
+	"gpio97", "gpio98", "gpio99",
 };
-static const char * const uim2_data_groups[] = {
+static const char * const qdss_stm31_groups[] = {
 	"gpio0",
 };
 static const char * const ebi0_wrcdc_groups[] = {
@@ -483,6 +489,10 @@ static const char * const uim2_present_groups[] = {
 static const char * const qdss_stm30_groups[] = {
 	"gpio1",
 };
+static const char * const blsp_uart1_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio20", "gpio21", "gpio22",
+	"gpio23",
+};
 static const char * const uim2_reset_groups[] = {
 	"gpio2",
 };
@@ -557,19 +567,19 @@ static const char * const gcc_gp3_groups[] = {
 static const char * const qdss_stm19_groups[] = {
 	"gpio12",
 };
-static const char * const qdss12_groups[] = {
+static const char * const qdss4_groups[] = {
 	"gpio12",
 };
 static const char * const qdss_stm18_groups[] = {
 	"gpio13",
 };
-static const char * const qdss13_groups[] = {
+static const char * const qdss5_groups[] = {
 	"gpio13",
 };
 static const char * const qdss_stm17_groups[] = {
 	"gpio14",
 };
-static const char * const qdss14_groups[] = {
+static const char * const qdss6_groups[] = {
 	"gpio14",
 };
 static const char * const bimc_dte0_groups[] = {
@@ -578,8 +588,36 @@ static const char * const bimc_dte0_groups[] = {
 static const char * const native_tsens_groups[] = {
 	"gpio14",
 };
-static const char * const vsense_trigger_groups[] = {
-	"gpio14",
+static const char * const qdss_stm16_groups[] = {
+	"gpio15",
+};
+static const char * const qdss7_groups[] = {
+	"gpio15",
+};
+static const char * const bimc_dte1_groups[] = {
+	"gpio15", "gpio60",
+};
+static const char * const sec_mi2s_groups[] = {
+	"gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+	"gpio23",
+};
+static const char * const blsp_spi4_groups[] = {
+	"gpio16", "gpio17", "gpio18", "gpio19", "gpio52", "gpio62", "gpio71",
+};
+static const char * const blsp_uart4_groups[] = {
+	"gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+	"gpio23",
+};
+static const char * const qdss_cti_groups[] = {
+	"gpio16", "gpio16", "gpio17", "gpio17", "gpio22", "gpio22", "gpio23",
+	"gpio23", "gpio54", "gpio54", "gpio55", "gpio55", "gpio59", "gpio61",
+	"gpio88", "gpio88", "gpio89", "gpio89",
+};
+static const char * const qdss_stm27_groups[] = {
+	"gpio16",
+};
+static const char * const qdss8_groups[] = {
+	"gpio16",
 };
 static const char * const qdss_stm26_groups[] = {
 	"gpio17",
@@ -611,70 +649,45 @@ static const char * const qdss_stm24_groups[] = {
 static const char * const qdss11_groups[] = {
 	"gpio19",
 };
-static const char * const qdss_stm16_groups[] = {
-	"gpio15",
-};
-static const char * const qdss15_groups[] = {
-	"gpio15",
-};
-static const char * const bimc_dte1_groups[] = {
-	"gpio15", "gpio60",
-};
-static const char * const sec_mi2s_groups[] = {
-	"gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
-	"gpio23",
-};
-static const char * const blsp_spi4_groups[] = {
-	"gpio16", "gpio17", "gpio18", "gpio19", "gpio52", "gpio62", "gpio71",
-};
-static const char * const blsp_uart4_groups[] = {
-	"gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
-	"gpio23",
-};
-static const char * const qdss_cti_groups[] = {
-	"gpio16", "gpio16", "gpio17", "gpio17", "gpio22", "gpio22", "gpio23",
-	"gpio23", "gpio54", "gpio54", "gpio55", "gpio55", "gpio59", "gpio61",
-	"gpio88", "gpio88", "gpio89", "gpio89",
-};
-static const char * const qdss_stm27_groups[] = {
-	"gpio16",
-};
-static const char * const qdss8_groups[] = {
-	"gpio16",
-};
 static const char * const ebi2_a_groups[] = {
 	"gpio20",
 };
-static const char * const qdss_stm3_groups[] = {
-	"gpio20",
-};
 static const char * const ebi2_lcd_groups[] = {
 	"gpio21", "gpio22", "gpio23",
 };
-static const char * const qdss_stm2_groups[] = {
-	"gpio21",
-};
 static const char * const pll_bist_groups[] = {
 	"gpio22",
 };
-static const char * const qdss_stm1_groups[] = {
-	"gpio22",
-};
-static const char * const qdss_stm0_groups[] = {
-	"gpio23",
-};
 static const char * const adsp_ext_groups[] = {
 	"gpio24", "gpio25",
 };
-static const char * const epm1_groups[] = {
-	"gpio25",
+static const char * const qdss_stm11_groups[] = {
+	"gpio24",
 };
 static const char * const m_voc_groups[] = {
 	"gpio25", "gpio46", "gpio59", "gpio61",
 };
+static const char * const qdss_stm10_groups[] = {
+	"gpio25",
+};
 static const char * const native_char_groups[] = {
 	"gpio26",
 };
+static const char * const native_char3_groups[] = {
+	"gpio28",
+};
+static const char * const nav_pps_groups[] = {
+	"gpio29", "gpio42", "gpio62",
+};
+static const char * const nav_dr_groups[] = {
+	"gpio29", "gpio42", "gpio62",
+};
+static const char * const native_char2_groups[] = {
+	"gpio29",
+};
+static const char * const native_tsense_groups[] = {
+	"gpio29",
+};
 static const char * const native_char1_groups[] = {
 	"gpio32",
 };
@@ -699,24 +712,6 @@ static const char * const pll_test_groups[] = {
 static const char * const cri_trng_groups[] = {
 	"gpio36",
 };
-static const char * const wmss_reset_groups[] = {
-	"gpio28",
-};
-static const char * const native_char3_groups[] = {
-	"gpio28",
-};
-static const char * const nav_pps_groups[] = {
-	"gpio29", "gpio42", "gpio62",
-};
-static const char * const nav_dr_groups[] = {
-	"gpio29", "gpio42", "gpio62",
-};
-static const char * const native_char2_groups[] = {
-	"gpio29",
-};
-static const char * const native_tsense_groups[] = {
-	"gpio29",
-};
 static const char * const prng_rosc_groups[] = {
 	"gpio38",
 };
@@ -732,59 +727,41 @@ static const char * const pll_ref_groups[] = {
 static const char * const coex_uart_groups[] = {
 	"gpio44", "gpio45",
 };
-static const char * const qdss_stm11_groups[] = {
+static const char * const qdss_tracectl_groups[] = {
 	"gpio44",
 };
-static const char * const qdss_stm10_groups[] = {
-	"gpio45",
-};
-static const char * const ddr_pxi0_groups[] = {
-	"gpio45", "gpio46",
-};
-static const char * const ap2mdm_status_groups[] = {
-	"gpio46",
-};
 static const char * const ddr_bist_groups[] = {
 	"gpio46", "gpio47", "gpio48", "gpio49",
 };
-static const char * const mdm2ap_status_groups[] = {
-	"gpio47",
-};
-static const char * const ap2mdm_err_groups[] = {
-	"gpio48",
-};
-static const char * const mdm2ap_err_groups[] = {
-	"gpio49",
-};
-static const char * const ap2mdm_vdd_groups[] = {
-	"gpio50",
-};
-static const char * const mdm2ap_vdd_groups[] = {
-	"gpio51",
-};
-static const char * const ap2mdm_wake_groups[] = {
-	"gpio52",
-};
-static const char * const pciehost_rst_groups[] = {
-	"gpio52",
-};
 static const char * const blsp_spi1_groups[] = {
 	"gpio52", "gpio62", "gpio71", "gpio72", "gpio73", "gpio74", "gpio75",
 };
-static const char * const qdss_stm14_groups[] = {
-	"gpio52",
-};
-static const char * const pcie_wake_groups[] = {
-	"gpio53",
-};
-static const char * const mdm2ap_wake_groups[] = {
-	"gpio53",
-};
 static const char * const pci_e_groups[] = {
-	"gpio53", "gpio57",
+	"gpio53",
+};
+static const char * const tgu_ch0_groups[] = {
+	"gpio55",
+};
+static const char * const pcie_clkreq_groups[] = {
+	"gpio56",
+};
+static const char * const qdss_stm15_groups[] = {
+	"gpio57",
+};
+static const char * const qdss_stm14_groups[] = {
+	"gpio58",
 };
 static const char * const qdss_stm13_groups[] = {
-	"gpio53",
+	"gpio59",
+};
+static const char * const mgpi_clk_groups[] = {
+	"gpio60", "gpio71",
+};
+static const char * const qdss_stm12_groups[] = {
+	"gpio60",
+};
+static const char * const qdss_stm9_groups[] = {
+	"gpio61",
 };
 static const char * const i2s_mclk_groups[] = {
 	"gpio62",
@@ -801,93 +778,66 @@ static const char * const qdss_stm8_groups[] = {
 static const char * const qdss_stm7_groups[] = {
 	"gpio63",
 };
-static const char * const qdss4_groups[] = {
-	"gpio63",
-};
-static const char * const tgu_ch0_groups[] = {
-	"gpio55",
-};
-static const char * const pcie_clkreq_groups[] = {
-	"gpio56",
-};
-static const char * const qdss_stm9_groups[] = {
-	"gpio56",
-};
-static const char * const qdss_stm15_groups[] = {
-	"gpio57",
-};
-static const char * const mgpi_clk_groups[] = {
-	"gpio60", "gpio71",
-};
-static const char * const qdss_stm12_groups[] = {
-	"gpio60",
-};
-static const char * const qdss_tracectl_groups[] = {
-	"gpio60",
-};
-static const char * const atest_char_groups[] = {
+static const char * const qdss12_groups[] = {
 	"gpio63",
 };
 static const char * const qdss_stm6_groups[] = {
 	"gpio64",
 };
-static const char * const qdss5_groups[] = {
-	"gpio64",
-};
-static const char * const atest_char3_groups[] = {
+static const char * const qdss13_groups[] = {
 	"gpio64",
 };
 static const char * const qdss_stm5_groups[] = {
 	"gpio65",
 };
-static const char * const qdss6_groups[] = {
-	"gpio65",
-};
-static const char * const atest_char2_groups[] = {
+static const char * const qdss14_groups[] = {
 	"gpio65",
 };
 static const char * const qdss_stm4_groups[] = {
 	"gpio66",
 };
-static const char * const qdss7_groups[] = {
-	"gpio66",
-};
-static const char * const atest_char1_groups[] = {
+static const char * const qdss15_groups[] = {
 	"gpio66",
 };
 static const char * const uim1_data_groups[] = {
 	"gpio67",
 };
-static const char * const atest_char0_groups[] = {
+static const char * const qdss_stm3_groups[] = {
 	"gpio67",
 };
 static const char * const uim1_present_groups[] = {
 	"gpio68",
 };
+static const char * const qdss_stm2_groups[] = {
+	"gpio68",
+};
 static const char * const uim1_reset_groups[] = {
 	"gpio69",
 };
+static const char * const qdss_stm1_groups[] = {
+	"gpio69",
+};
 static const char * const uim1_clk_groups[] = {
 	"gpio70",
 };
+static const char * const qdss_stm0_groups[] = {
+	"gpio70",
+};
 static const char * const dbg_out_groups[] = {
 	"gpio71",
 };
 static const char * const gcc_plltest_groups[] = {
 	"gpio73", "gpio74",
 };
-static const char * const usb2phy_ac_groups[] = {
-	"gpio87",
-};
 
 static const struct msm_function sdxpoorwills_functions[] = {
-	FUNCTION(qdss_stm31),
-	FUNCTION(blsp_uart1),
-	FUNCTION(gpio),
 	FUNCTION(uim2_data),
+	FUNCTION(gpio),
+	FUNCTION(qdss_stm31),
 	FUNCTION(ebi0_wrcdc),
 	FUNCTION(uim2_present),
 	FUNCTION(qdss_stm30),
+	FUNCTION(blsp_uart1),
 	FUNCTION(uim2_reset),
 	FUNCTION(blsp_i2c1),
 	FUNCTION(qdss_stm29),
@@ -912,14 +862,22 @@ static const struct msm_function sdxpoorwills_functions[] = {
 	FUNCTION(blsp_i2c3),
 	FUNCTION(gcc_gp3),
 	FUNCTION(qdss_stm19),
-	FUNCTION(qdss12),
+	FUNCTION(qdss4),
 	FUNCTION(qdss_stm18),
-	FUNCTION(qdss13),
+	FUNCTION(qdss5),
 	FUNCTION(qdss_stm17),
-	FUNCTION(qdss14),
+	FUNCTION(qdss6),
 	FUNCTION(bimc_dte0),
 	FUNCTION(native_tsens),
-	FUNCTION(vsense_trigger),
+	FUNCTION(qdss_stm16),
+	FUNCTION(qdss7),
+	FUNCTION(bimc_dte1),
+	FUNCTION(sec_mi2s),
+	FUNCTION(blsp_spi4),
+	FUNCTION(blsp_uart4),
+	FUNCTION(qdss_cti),
+	FUNCTION(qdss_stm27),
+	FUNCTION(qdss8),
 	FUNCTION(qdss_stm26),
 	FUNCTION(qdss9),
 	FUNCTION(blsp_i2c4),
@@ -930,26 +888,19 @@ static const struct msm_function sdxpoorwills_functions[] = {
 	FUNCTION(gcc_gp2),
 	FUNCTION(qdss_stm24),
 	FUNCTION(qdss11),
-	FUNCTION(qdss_stm16),
-	FUNCTION(qdss15),
-	FUNCTION(bimc_dte1),
-	FUNCTION(sec_mi2s),
-	FUNCTION(blsp_spi4),
-	FUNCTION(blsp_uart4),
-	FUNCTION(qdss_cti),
-	FUNCTION(qdss_stm27),
-	FUNCTION(qdss8),
 	FUNCTION(ebi2_a),
-	FUNCTION(qdss_stm3),
 	FUNCTION(ebi2_lcd),
-	FUNCTION(qdss_stm2),
 	FUNCTION(pll_bist),
-	FUNCTION(qdss_stm1),
-	FUNCTION(qdss_stm0),
 	FUNCTION(adsp_ext),
-	FUNCTION(epm1),
+	FUNCTION(qdss_stm11),
 	FUNCTION(m_voc),
+	FUNCTION(qdss_stm10),
 	FUNCTION(native_char),
+	FUNCTION(native_char3),
+	FUNCTION(nav_pps),
+	FUNCTION(nav_dr),
+	FUNCTION(native_char2),
+	FUNCTION(native_tsense),
 	FUNCTION(native_char1),
 	FUNCTION(pa_indicator),
 	FUNCTION(qdss_traceclk),
@@ -958,204 +909,200 @@ static const struct msm_function sdxpoorwills_functions[] = {
 	FUNCTION(qlink_req),
 	FUNCTION(pll_test),
 	FUNCTION(cri_trng),
-	FUNCTION(wmss_reset),
-	FUNCTION(native_char3),
-	FUNCTION(nav_pps),
-	FUNCTION(nav_dr),
-	FUNCTION(native_char2),
-	FUNCTION(native_tsense),
 	FUNCTION(prng_rosc),
 	FUNCTION(cri_trng0),
 	FUNCTION(cri_trng1),
 	FUNCTION(pll_ref),
 	FUNCTION(coex_uart),
-	FUNCTION(qdss_stm11),
-	FUNCTION(qdss_stm10),
-	FUNCTION(ddr_pxi0),
-	FUNCTION(ap2mdm_status),
+	FUNCTION(qdss_tracectl),
 	FUNCTION(ddr_bist),
-	FUNCTION(mdm2ap_status),
-	FUNCTION(ap2mdm_err),
-	FUNCTION(mdm2ap_err),
-	FUNCTION(ap2mdm_vdd),
-	FUNCTION(mdm2ap_vdd),
-	FUNCTION(ap2mdm_wake),
-	FUNCTION(pciehost_rst),
 	FUNCTION(blsp_spi1),
-	FUNCTION(qdss_stm14),
-	FUNCTION(pcie_wake),
-	FUNCTION(mdm2ap_wake),
 	FUNCTION(pci_e),
+	FUNCTION(tgu_ch0),
+	FUNCTION(pcie_clkreq),
+	FUNCTION(qdss_stm15),
+	FUNCTION(qdss_stm14),
 	FUNCTION(qdss_stm13),
+	FUNCTION(mgpi_clk),
+	FUNCTION(qdss_stm12),
+	FUNCTION(qdss_stm9),
 	FUNCTION(i2s_mclk),
 	FUNCTION(audio_ref),
 	FUNCTION(ldo_update),
 	FUNCTION(qdss_stm8),
 	FUNCTION(qdss_stm7),
-	FUNCTION(qdss4),
-	FUNCTION(tgu_ch0),
-	FUNCTION(pcie_clkreq),
-	FUNCTION(qdss_stm9),
-	FUNCTION(qdss_stm15),
-	FUNCTION(mgpi_clk),
-	FUNCTION(qdss_stm12),
-	FUNCTION(qdss_tracectl),
-	FUNCTION(atest_char),
+	FUNCTION(qdss12),
 	FUNCTION(qdss_stm6),
-	FUNCTION(qdss5),
-	FUNCTION(atest_char3),
+	FUNCTION(qdss13),
 	FUNCTION(qdss_stm5),
-	FUNCTION(qdss6),
-	FUNCTION(atest_char2),
+	FUNCTION(qdss14),
 	FUNCTION(qdss_stm4),
-	FUNCTION(qdss7),
-	FUNCTION(atest_char1),
+	FUNCTION(qdss15),
 	FUNCTION(uim1_data),
-	FUNCTION(atest_char0),
+	FUNCTION(qdss_stm3),
 	FUNCTION(uim1_present),
+	FUNCTION(qdss_stm2),
 	FUNCTION(uim1_reset),
+	FUNCTION(qdss_stm1),
 	FUNCTION(uim1_clk),
+	FUNCTION(qdss_stm0),
 	FUNCTION(dbg_out),
 	FUNCTION(gcc_plltest),
-	FUNCTION(usb2phy_ac),
 };
 
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
 static const struct msm_pingroup sdxpoorwills_groups[] = {
-	PINGROUP(0, uim2_data, blsp_uart1, qdss_stm31, ebi0_wrcdc, NA, NA, NA,
-		 NA, NA),
-	PINGROUP(1, uim2_present, blsp_uart1, qdss_stm30, NA, NA, NA, NA, NA,
-		 NA),
-	PINGROUP(2, uim2_reset, blsp_uart1, blsp_i2c1, qdss_stm29, ebi0_wrcdc,
-		 NA, NA, NA, NA),
-	PINGROUP(3, uim2_clk, blsp_uart1, blsp_i2c1, qdss_stm28, NA, NA, NA,
-		 NA, NA),
-	PINGROUP(4, blsp_spi2, blsp_uart2, NA, qdss_stm23, qdss3, NA, NA, NA,
-		 NA),
-	PINGROUP(5, blsp_spi2, blsp_uart2, NA, qdss_stm22, qdss2, NA, NA, NA,
-		 NA),
-	PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, qdss_stm21, qdss1,
-		 NA, NA, NA),
-	PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, qdss_stm20, qdss0,
-		 NA, NA, NA),
-	PINGROUP(8, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, ldo_en, NA, NA,
-		 NA, NA),
-	PINGROUP(9, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, NA, NA, NA, NA,
-		 NA),
-	PINGROUP(10, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg, NA,
-		 NA, NA, NA),
-	PINGROUP(11, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg,
-		 gcc_gp3, NA, NA, NA),
-	PINGROUP(12, pri_mi2s, NA, qdss_stm19, qdss12, NA, NA, NA, NA, NA),
-	PINGROUP(13, pri_mi2s, NA, qdss_stm18, qdss13, NA, NA, NA, NA, NA),
-	PINGROUP(14, pri_mi2s, NA, NA, qdss_stm17, qdss14, bimc_dte0,
-		 native_tsens, vsense_trigger, NA),
-	PINGROUP(15, pri_mi2s, NA, NA, qdss_stm16, qdss15, NA, NA, bimc_dte1,
-		 NA),
-	PINGROUP(16, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, NA,
-		 NA, qdss_stm27, qdss8),
-	PINGROUP(17, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, NA,
-		 qdss_stm26, qdss9, NA),
-	PINGROUP(18, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, gcc_gp1, NA,
-		 qdss_stm25, qdss10, NA),
-	PINGROUP(19, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, jitter_bist,
-		 gcc_gp2, NA, qdss_stm24, qdss11),
-	PINGROUP(20, sec_mi2s, ebi2_a, blsp_uart1, blsp_uart4, NA, qdss_stm3,
-		 NA, NA, NA),
-	PINGROUP(21, sec_mi2s, ebi2_lcd, blsp_uart1, blsp_uart4, NA, NA,
-		 qdss_stm2, NA, NA),
-	PINGROUP(22, sec_mi2s, ebi2_lcd, blsp_uart1, qdss_cti, qdss_cti,
-		 blsp_uart4, pll_bist, NA, qdss_stm1),
-	PINGROUP(23, sec_mi2s, ebi2_lcd, qdss_cti, qdss_cti, blsp_uart1,
-		 blsp_uart4, NA, qdss_stm0, NA),
-	PINGROUP(24, adsp_ext, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(25, m_voc, adsp_ext, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(26, NA, NA, NA, native_char, NA, NA, NA, NA, NA),
-	PINGROUP(27, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(28, wmss_reset, native_char3, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(29, NA, NA, nav_pps, nav_dr, NA, native_char2, native_tsense,
-		 NA, NA),
-	PINGROUP(30, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(32, NA, native_char1, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(33, NA, pa_indicator, qdss_traceclk, native_char0, NA, NA, NA,
-		 NA, NA),
-	PINGROUP(34, qlink_en, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(35, qlink_req, pll_test, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(36, NA, NA, cri_trng, NA, NA, NA, NA, NA, NA),
-	PINGROUP(37, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(38, NA, NA, prng_rosc, NA, NA, NA, NA, NA, NA),
-	PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(40, NA, NA, cri_trng0, NA, NA, NA, NA, NA, NA),
-	PINGROUP(41, NA, NA, cri_trng1, NA, NA, NA, NA, NA, NA),
-	PINGROUP(42, nav_pps, NA, nav_dr, pll_ref, NA, NA, NA, NA, NA),
-	PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(44, coex_uart, NA, qdss_stm11, NA, NA, NA, NA, NA, NA),
-	PINGROUP(45, coex_uart, NA, qdss_stm10, ddr_pxi0, NA, NA, NA, NA, NA),
-	PINGROUP(46, m_voc, ddr_bist, ddr_pxi0, NA, NA, NA, NA, NA, NA),
-	PINGROUP(47, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(48, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(49, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(52, blsp_spi2, blsp_spi1, blsp_spi3, blsp_spi4, NA, NA,
-		 qdss_stm14, NA, NA),
-	PINGROUP(53, pci_e, NA, NA, qdss_stm13, NA, NA, NA, NA, NA),
-	PINGROUP(54, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(55, qdss_cti, qdss_cti, tgu_ch0, NA, NA, NA, NA, NA, NA),
-	PINGROUP(56, pcie_clkreq, NA, qdss_stm9, NA, NA, NA, NA, NA, NA),
-	PINGROUP(57, NA, qdss_stm15, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(59, qdss_cti, m_voc, bimc_dte0, NA, NA, NA, NA, NA, NA),
-	PINGROUP(60, mgpi_clk, NA, qdss_stm12, qdss_tracectl, bimc_dte1, NA,
-		 NA, NA, NA),
-	PINGROUP(61, qdss_cti, NA, m_voc, NA, NA, NA, NA, NA, NA),
-	PINGROUP(62, i2s_mclk, nav_pps, nav_dr, audio_ref, blsp_spi1,
-		 blsp_spi2, blsp_spi3, blsp_spi4, ldo_update),
-	PINGROUP(63, blsp_uart2, NA, qdss_stm7, qdss4, atest_char, NA, NA, NA,
-		 NA),
-	PINGROUP(64, blsp_uart2, NA, qdss_stm6, qdss5, atest_char3, NA, NA, NA,
-		 NA),
-	PINGROUP(65, blsp_uart2, blsp_i2c2, NA, qdss_stm5, qdss6, atest_char2,
-		 NA, NA, NA),
-	PINGROUP(66, blsp_uart2, blsp_i2c2, NA, qdss_stm4, qdss7, atest_char1,
-		 NA, NA, NA),
-	PINGROUP(67, uim1_data, atest_char0, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(68, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(69, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(70, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(71, mgpi_clk, blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4,
-		 dbg_out, NA, NA, NA),
-	PINGROUP(72, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(73, NA, blsp_spi1, NA, gcc_plltest, NA, NA, NA, NA, NA),
-	PINGROUP(74, NA, blsp_spi1, NA, blsp_i2c1, gcc_plltest, NA, NA, NA, NA),
-	PINGROUP(75, NA, blsp_spi1, NA, blsp_i2c1, NA, NA, NA, NA, NA),
-	PINGROUP(76, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(77, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(87, NA, NA, usb2phy_ac, NA, NA, NA, NA, NA, NA),
-	PINGROUP(88, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(89, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA),
-	SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
-	SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
-	SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+	[0] = PINGROUP(0, uim2_data, blsp_uart1, qdss_stm31, ebi0_wrcdc, NA,
+		       NA, NA, NA, NA),
+	[1] = PINGROUP(1, uim2_present, blsp_uart1, qdss_stm30, NA, NA, NA, NA,
+		       NA, NA),
+	[2] = PINGROUP(2, uim2_reset, blsp_uart1, blsp_i2c1, qdss_stm29,
+		       ebi0_wrcdc, NA, NA, NA, NA),
+	[3] = PINGROUP(3, uim2_clk, blsp_uart1, blsp_i2c1, qdss_stm28, NA, NA,
+		       NA, NA, NA),
+	[4] = PINGROUP(4, blsp_spi2, blsp_uart2, NA, qdss_stm23, qdss3, NA, NA,
+		       NA, NA),
+	[5] = PINGROUP(5, blsp_spi2, blsp_uart2, NA, qdss_stm22, qdss2, NA, NA,
+		       NA, NA),
+	[6] = PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, NA, qdss_stm21,
+		       qdss1, NA, NA, NA),
+	[7] = PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, NA, qdss_stm20,
+		       qdss0, NA, NA, NA),
+	[8] = PINGROUP(8, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, ldo_en, NA,
+		       NA, NA, NA),
+	[9] = PINGROUP(9, pri_mi2s, blsp_spi3, blsp_uart3, ext_dbg, NA, NA, NA,
+		       NA, NA),
+	[10] = PINGROUP(10, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3,
+			ext_dbg, NA, NA, NA, NA),
+	[11] = PINGROUP(11, pri_mi2s, blsp_spi3, blsp_uart3, blsp_i2c3,
+			ext_dbg, gcc_gp3, NA, NA, NA),
+	[12] = PINGROUP(12, pri_mi2s, NA, qdss_stm19, qdss4, NA, NA, NA, NA,
+			NA),
+	[13] = PINGROUP(13, pri_mi2s, NA, qdss_stm18, qdss5, NA, NA, NA, NA,
+			NA),
+	[14] = PINGROUP(14, pri_mi2s, NA, NA, qdss_stm17, qdss6, bimc_dte0,
+			native_tsens, NA, NA),
+	[15] = PINGROUP(15, pri_mi2s, NA, NA, qdss_stm16, qdss7, NA, NA,
+			bimc_dte1, NA),
+	[16] = PINGROUP(16, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti,
+			qdss_cti, NA, qdss_stm27, qdss8, NA),
+	[17] = PINGROUP(17, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti,
+			qdss_cti, qdss_stm26, qdss9, NA, NA),
+	[18] = PINGROUP(18, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4,
+			gcc_gp1, qdss_stm25, qdss10, NA, NA),
+	[19] = PINGROUP(19, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4,
+			jitter_bist, gcc_gp2, qdss_stm24, qdss11, NA),
+	[20] = PINGROUP(20, sec_mi2s, ebi2_a, blsp_uart1, blsp_uart4, NA, NA,
+			NA, NA, NA),
+	[21] = PINGROUP(21, sec_mi2s, ebi2_lcd, blsp_uart1, blsp_uart4, NA, NA,
+			NA, NA, NA),
+	[22] = PINGROUP(22, sec_mi2s, ebi2_lcd, blsp_uart1, qdss_cti, qdss_cti,
+			blsp_uart4, pll_bist, NA, NA),
+	[23] = PINGROUP(23, sec_mi2s, ebi2_lcd, qdss_cti, qdss_cti, blsp_uart1,
+			blsp_uart4, NA, NA, NA),
+	[24] = PINGROUP(24, adsp_ext, NA, qdss_stm11, NA, NA, NA, NA, NA, NA),
+	[25] = PINGROUP(25, m_voc, adsp_ext, NA, qdss_stm10, NA, NA, NA, NA,
+			NA),
+	[26] = PINGROUP(26, NA, NA, NA, native_char, NA, NA, NA, NA, NA),
+	[27] = PINGROUP(27, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[28] = PINGROUP(28, NA, native_char3, NA, NA, NA, NA, NA, NA, NA),
+	[29] = PINGROUP(29, NA, NA, nav_pps, nav_dr, NA, native_char2,
+			native_tsense, NA, NA),
+	[30] = PINGROUP(30, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[31] = PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[32] = PINGROUP(32, NA, native_char1, NA, NA, NA, NA, NA, NA, NA),
+	[33] = PINGROUP(33, NA, pa_indicator, qdss_traceclk, native_char0, NA,
+			NA, NA, NA, NA),
+	[34] = PINGROUP(34, qlink_en, NA, NA, NA, NA, NA, NA, NA, NA),
+	[35] = PINGROUP(35, qlink_req, pll_test, NA, NA, NA, NA, NA, NA, NA),
+	[36] = PINGROUP(36, NA, NA, cri_trng, NA, NA, NA, NA, NA, NA),
+	[37] = PINGROUP(37, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[38] = PINGROUP(38, NA, NA, prng_rosc, NA, NA, NA, NA, NA, NA),
+	[39] = PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[40] = PINGROUP(40, NA, NA, cri_trng0, NA, NA, NA, NA, NA, NA),
+	[41] = PINGROUP(41, NA, NA, cri_trng1, NA, NA, NA, NA, NA, NA),
+	[42] = PINGROUP(42, nav_pps, nav_dr, pll_ref, NA, NA, NA, NA, NA, NA),
+	[43] = PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[44] = PINGROUP(44, coex_uart, qdss_tracectl, NA, NA, NA, NA, NA, NA,
+			NA),
+	[45] = PINGROUP(45, coex_uart, NA, NA, NA, NA, NA, NA, NA, NA),
+	[46] = PINGROUP(46, m_voc, ddr_bist, NA, NA, NA, NA, NA, NA, NA),
+	[47] = PINGROUP(47, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
+	[48] = PINGROUP(48, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
+	[49] = PINGROUP(49, ddr_bist, NA, NA, NA, NA, NA, NA, NA, NA),
+	[50] = PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[51] = PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[52] = PINGROUP(52, blsp_spi2, blsp_spi1, blsp_spi3, blsp_spi4, NA, NA,
+			NA, NA, NA),
+	[53] = PINGROUP(53, pci_e, NA, NA, NA, NA, NA, NA, NA, NA),
+	[54] = PINGROUP(54, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+	[55] = PINGROUP(55, qdss_cti, qdss_cti, tgu_ch0, NA, NA, NA, NA, NA,
+			NA),
+	[56] = PINGROUP(56, pcie_clkreq, NA, NA, NA, NA, NA, NA, NA, NA),
+	[57] = PINGROUP(57, NA, qdss_stm15, NA, NA, NA, NA, NA, NA, NA),
+	[58] = PINGROUP(58, NA, qdss_stm14, NA, NA, NA, NA, NA, NA, NA),
+	[59] = PINGROUP(59, qdss_cti, m_voc, NA, qdss_stm13, bimc_dte0, NA, NA,
+			NA, NA),
+	[60] = PINGROUP(60, mgpi_clk, NA, qdss_stm12, bimc_dte1, NA, NA, NA,
+			NA, NA),
+	[61] = PINGROUP(61, qdss_cti, NA, m_voc, NA, qdss_stm9, NA, NA, NA, NA),
+	[62] = PINGROUP(62, i2s_mclk, nav_pps, nav_dr, audio_ref, blsp_spi1,
+			blsp_spi2, blsp_spi3, blsp_spi4, ldo_update),
+	[63] = PINGROUP(63, blsp_uart2, NA, qdss_stm7, qdss12, NA, NA, NA, NA,
+			NA),
+	[64] = PINGROUP(64, blsp_uart2, qdss_stm6, qdss13, NA, NA, NA, NA, NA,
+			NA),
+	[65] = PINGROUP(65, blsp_uart2, blsp_i2c2, NA, qdss_stm5, qdss14, NA,
+			NA, NA, NA),
+	[66] = PINGROUP(66, blsp_uart2, blsp_i2c2, NA, qdss_stm4, qdss15, NA,
+			NA, NA, NA),
+	[67] = PINGROUP(67, uim1_data, NA, qdss_stm3, NA, NA, NA, NA, NA, NA),
+	[68] = PINGROUP(68, uim1_present, qdss_stm2, NA, NA, NA, NA, NA, NA,
+			NA),
+	[69] = PINGROUP(69, uim1_reset, qdss_stm1, NA, NA, NA, NA, NA, NA, NA),
+	[70] = PINGROUP(70, uim1_clk, NA, qdss_stm0, NA, NA, NA, NA, NA, NA),
+	[71] = PINGROUP(71, mgpi_clk, blsp_spi1, blsp_spi2, blsp_spi3,
+			blsp_spi4, dbg_out, NA, NA, NA),
+	[72] = PINGROUP(72, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA),
+	[73] = PINGROUP(73, NA, blsp_spi1, NA, gcc_plltest, NA, NA, NA, NA, NA),
+	[74] = PINGROUP(74, NA, blsp_spi1, NA, blsp_i2c1, gcc_plltest, NA, NA,
+			NA, NA),
+	[75] = PINGROUP(75, NA, blsp_spi1, NA, blsp_i2c1, NA, NA, NA, NA, NA),
+	[76] = PINGROUP(76, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA, NA),
+	[77] = PINGROUP(77, blsp_i2c4, NA, NA, NA, NA, NA, NA, NA, NA),
+	[78] = PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[79] = PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[80] = PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[81] = PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[82] = PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[83] = PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[84] = PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[85] = PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[86] = PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[87] = PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[88] = PINGROUP(88, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+	[89] = PINGROUP(89, qdss_cti, qdss_cti, NA, NA, NA, NA, NA, NA, NA),
+	[90] = PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[91] = PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[92] = PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[93] = PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[94] = PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[95] = PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[96] = PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[97] = PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[98] = PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[99] = PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	[100] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
+	[101] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+	[102] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+	[103] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+	[104] = SDC_QDSD_PINGROUP(sdc2_clk, 0x0, 14, 6),
+	[105] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x0, 11, 3),
+	[106] = SDC_QDSD_PINGROUP(sdc2_data, 0x0, 9, 0),
 };
 
 static const struct msm_pinctrl_soc_data sdxpoorwills_pinctrl = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 70d74f0..6230356 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -536,6 +536,7 @@ static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_c
 	int retval;
 	struct ipa_wan_msg *wan_msg;
 	struct ipa_msg_meta msg_meta;
+	struct ipa_wan_msg cache_wan_msg;
 
 	wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
 	if (!wan_msg) {
@@ -549,6 +550,8 @@ static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_c
 		return -EFAULT;
 	}
 
+	memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
+
 	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
 	msg_meta.msg_type = msg_type;
 	msg_meta.msg_len = sizeof(struct ipa_wan_msg);
@@ -565,8 +568,8 @@ static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_c
 		/* cache the cne event */
 		memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
 			ipa_ctx->num_ipa_cne_evt_req].wan_msg,
-			wan_msg,
-			sizeof(struct ipa_wan_msg));
+			&cache_wan_msg,
+			sizeof(cache_wan_msg));
 
 		memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
 			ipa_ctx->num_ipa_cne_evt_req].msg_meta,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index 57e0a53..2608e1d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -655,9 +655,8 @@ int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req)
 
 	/* check if the filter rules from IPACM is valid */
 	if (req->filter_index_list_len == 0) {
-		IPAWANERR(" delete UL filter rule for pipe %d\n",
+		IPAWANDBG(" delete UL filter rule for pipe %d\n",
 		req->source_pipe_index);
-		return -EINVAL;
 	} else if (req->filter_index_list_len > QMI_IPA_MAX_FILTERS_V01) {
 		IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
 		req->source_pipe_index,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 097eec8..e9df986 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -545,6 +545,7 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_
 	int retval;
 	struct ipa_wan_msg *wan_msg;
 	struct ipa_msg_meta msg_meta;
+	struct ipa_wan_msg cache_wan_msg;
 
 	wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
 	if (!wan_msg) {
@@ -558,6 +559,8 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_
 		return -EFAULT;
 	}
 
+	memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
+
 	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
 	msg_meta.msg_type = msg_type;
 	msg_meta.msg_len = sizeof(struct ipa_wan_msg);
@@ -574,8 +577,8 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_
 		/* cache the cne event */
 		memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
 			ipa3_ctx->num_ipa_cne_evt_req].wan_msg,
-			wan_msg,
-			sizeof(struct ipa_wan_msg));
+			&cache_wan_msg,
+			sizeof(cache_wan_msg));
 
 		memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
 			ipa3_ctx->num_ipa_cne_evt_req].msg_meta,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index 11c1737..3bf0327 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -425,7 +425,13 @@ static void activate_work_func(struct work_struct *work)
 	activate_client(client->hdl);
 
 	mutex_lock(&ipa_pm_ctx->client_mutex);
-	client->callback(client->callback_params, IPA_PM_CLIENT_ACTIVATED);
+	if (client->callback) {
+		client->callback(client->callback_params,
+			IPA_PM_CLIENT_ACTIVATED);
+	} else {
+		IPA_PM_ERR("client has no callback");
+		WARN_ON(1);
+	}
 	mutex_unlock(&ipa_pm_ctx->client_mutex);
 
 	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
@@ -677,8 +683,7 @@ int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl)
 {
 	struct ipa_pm_client *client;
 
-	if (params == NULL || hdl == NULL || params->name == NULL
-		|| params->callback == NULL) {
+	if (params == NULL || hdl == NULL || params->name == NULL) {
 		IPA_PM_ERR("Invalid Params\n");
 		return -EINVAL;
 	}
@@ -1115,9 +1120,14 @@ int ipa_pm_handle_suspend(u32 pipe_bitmask)
 		if (pipe_bitmask & (1 << i)) {
 			client = ipa_pm_ctx->clients_by_pipe[i];
 			if (client && client_notified[client->hdl] == false) {
-				client->callback(client->callback_params,
-					IPA_PM_REQUEST_WAKEUP);
-				client_notified[client->hdl] = true;
+				if (client->callback) {
+					client->callback(client->callback_params
+						, IPA_PM_REQUEST_WAKEUP);
+					client_notified[client->hdl] = true;
+				} else {
+					IPA_PM_ERR("client has no callback");
+					WARN_ON(1);
+				}
 			}
 		}
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 4e8c233..fbaa4ae 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -798,9 +798,8 @@ int ipa3_qmi_filter_notify_send(
 
 	/* check if the filter rules from IPACM is valid */
 	if (req->rule_id_len == 0) {
-		IPAWANERR(" delete UL filter rule for pipe %d\n",
+		IPAWANDBG(" delete UL filter rule for pipe %d\n",
 		req->source_pipe_index);
-		return -EINVAL;
 	} else if (req->rule_id_len > QMI_IPA_MAX_FILTERS_V01) {
 		IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
 		req->source_pipe_index,
diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c
index 92321ad..8495787 100644
--- a/drivers/platform/msm/usb_bam.c
+++ b/drivers/platform/msm/usb_bam.c
@@ -102,7 +102,6 @@ struct usb_bam_sps_type {
 struct usb_bam_ctx_type {
 	struct usb_bam_sps_type		usb_bam_sps;
 	struct resource			*io_res;
-	void __iomem			*regs;
 	int				irq;
 	struct platform_device		*usb_bam_pdev;
 	struct workqueue_struct		*usb_bam_wq;
@@ -112,6 +111,7 @@ struct usb_bam_ctx_type {
 	u32				inactivity_timer_ms;
 	bool				is_bam_inactivity;
 	struct usb_bam_pipe_connect	*usb_bam_connections;
+	struct msm_usb_bam_data *usb_bam_data;
 	spinlock_t		usb_bam_lock;
 };
 
@@ -125,13 +125,13 @@ static char *bam_enable_strings[MAX_BAMS] = {
  * CI_CTRL & DWC3_CTRL shouldn't be used simultaneously
  * since both share the same prod & cons rm resourses
  */
-static enum ipa_client_type ipa_rm_resource_prod[MAX_BAMS] = {
+static enum ipa_rm_resource_name ipa_rm_resource_prod[MAX_BAMS] = {
 	[CI_CTRL] = IPA_RM_RESOURCE_USB_PROD,
 	[HSIC_CTRL]  = IPA_RM_RESOURCE_HSIC_PROD,
 	[DWC3_CTRL] = IPA_RM_RESOURCE_USB_PROD,
 };
 
-static enum ipa_client_type ipa_rm_resource_cons[MAX_BAMS] = {
+static enum ipa_rm_resource_name ipa_rm_resource_cons[MAX_BAMS] = {
 	[CI_CTRL] = IPA_RM_RESOURCE_USB_CONS,
 	[HSIC_CTRL]  = IPA_RM_RESOURCE_HSIC_CONS,
 	[DWC3_CTRL] = IPA_RM_RESOURCE_USB_CONS,
@@ -319,8 +319,6 @@ static int usb_bam_alloc_buffer(struct usb_bam_pipe_connect *pipe_connect)
 {
 	int ret = 0;
 	struct usb_bam_ctx_type *ctx = &msm_usb_bam[pipe_connect->bam_type];
-	struct msm_usb_bam_platform_data *pdata =
-				ctx->usb_bam_pdev->dev.platform_data;
 	struct sps_mem_buffer *data_buf = &(pipe_connect->data_mem_buf);
 	struct sps_mem_buffer *desc_buf = &(pipe_connect->desc_mem_buf);
 
@@ -359,7 +357,7 @@ static int usb_bam_alloc_buffer(struct usb_bam_pipe_connect *pipe_connect)
 		}
 
 		data_buf->phys_base = pipe_connect->data_fifo_base_offset +
-						pdata->usb_bam_fifo_baseaddr;
+				ctx->usb_bam_data->usb_bam_fifo_baseaddr;
 		data_buf->size = pipe_connect->data_fifo_size;
 		data_buf->base = ioremap(data_buf->phys_base, data_buf->size);
 		if (!data_buf->base) {
@@ -371,7 +369,7 @@ static int usb_bam_alloc_buffer(struct usb_bam_pipe_connect *pipe_connect)
 		memset_io(data_buf->base, 0, data_buf->size);
 
 		desc_buf->phys_base = pipe_connect->desc_fifo_base_offset +
-						pdata->usb_bam_fifo_baseaddr;
+				ctx->usb_bam_data->usb_bam_fifo_baseaddr;
 		desc_buf->size = pipe_connect->desc_fifo_size;
 		desc_buf->base = ioremap(desc_buf->phys_base, desc_buf->size);
 		if (!desc_buf->base) {
@@ -1068,7 +1066,6 @@ int usb_bam_connect(enum usb_ctrl cur_bam, int idx, u32 *bam_pipe_idx)
 	struct usb_bam_pipe_connect *pipe_connect =
 				&ctx->usb_bam_connections[idx];
 	struct device *bam_dev = &ctx->usb_bam_pdev->dev;
-	struct msm_usb_bam_platform_data *pdata = bam_dev->platform_data;
 	enum usb_bam_mode cur_mode;
 
 	if (pipe_connect->enabled) {
@@ -1094,7 +1091,7 @@ int usb_bam_connect(enum usb_ctrl cur_bam, int idx, u32 *bam_pipe_idx)
 
 	spin_lock(&ctx->usb_bam_lock);
 	/* Check if BAM requires RESET before connect and reset of first pipe */
-	if ((pdata->reset_on_connect == true) &&
+	if ((ctx->usb_bam_data->reset_on_connect == true) &&
 			    (ctx->pipes_enabled_per_bam == 0)) {
 		spin_unlock(&ctx->usb_bam_lock);
 
@@ -1595,8 +1592,6 @@ static int ss_usb_cons_release_resource(void)
 static void usb_bam_ipa_create_resources(enum usb_ctrl cur_bam)
 {
 	struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
-	struct msm_usb_bam_platform_data *pdata =
-					ctx->usb_bam_pdev->dev.platform_data;
 	struct ipa_rm_create_params usb_prod_create_params;
 	struct ipa_rm_create_params usb_cons_create_params;
 	int ret;
@@ -1605,7 +1600,8 @@ static void usb_bam_ipa_create_resources(enum usb_ctrl cur_bam)
 	memset(&usb_prod_create_params, 0, sizeof(usb_prod_create_params));
 	usb_prod_create_params.name = ipa_rm_resource_prod[cur_bam];
 	usb_prod_create_params.reg_params.notify_cb = usb_prod_notify_cb;
-	usb_prod_create_params.reg_params.user_data = &pdata->bam_type;
+	usb_prod_create_params.reg_params.user_data
+					 = &ctx->usb_bam_data->bam_type;
 	usb_prod_create_params.floor_voltage = IPA_VOLTAGE_SVS;
 	ret = ipa_rm_create_resource(&usb_prod_create_params);
 	if (ret) {
@@ -1628,6 +1624,22 @@ static void usb_bam_ipa_create_resources(enum usb_ctrl cur_bam)
 	}
 }
 
+static void usb_bam_ipa_delete_resources(enum usb_ctrl cur_bam)
+{
+	int ret;
+
+	ret = ipa_rm_delete_resource(ipa_rm_resource_prod[cur_bam]);
+	if (ret)
+		log_event_err("%s: Failed to delete USB_PROD resource\n",
+							__func__);
+
+	ret = ipa_rm_delete_resource(ipa_rm_resource_cons[cur_bam]);
+	if (ret)
+		log_event_err("%s: Failed to delete USB_CONS resource\n",
+							__func__);
+
+}
+
 static void wait_for_prod_granted(enum usb_ctrl cur_bam)
 {
 	int ret;
@@ -2134,16 +2146,14 @@ static int usb_bam_set_ipa_perf(enum usb_ctrl cur_bam,
 	int ret;
 	struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
 	struct ipa_rm_perf_profile ipa_rm_perf_prof;
-	struct msm_usb_bam_platform_data *pdata =
-					ctx->usb_bam_pdev->dev.platform_data;
 
 	if (usb_connection_speed == USB_SPEED_SUPER)
 		ipa_rm_perf_prof.max_supported_bandwidth_mbps =
-			pdata->max_mbps_superspeed;
+			ctx->usb_bam_data->max_mbps_superspeed;
 	else
 		/* Bam2Bam is supported only for SS and HS (HW limitation) */
 		ipa_rm_perf_prof.max_supported_bandwidth_mbps =
-			pdata->max_mbps_highspeed;
+			ctx->usb_bam_data->max_mbps_highspeed;
 
 	/*
 	 * Having a max mbps property in dtsi file is a must
@@ -2180,7 +2190,6 @@ int usb_bam_connect_ipa(enum usb_ctrl cur_bam,
 	struct usb_bam_ctx_type *ctx = &msm_usb_bam[cur_bam];
 	struct usb_bam_pipe_connect *pipe_connect;
 	struct device *bam_dev = &ctx->usb_bam_pdev->dev;
-	struct msm_usb_bam_platform_data *pdata = bam_dev->platform_data;
 	int ret;
 	bool bam2bam, is_dpl;
 
@@ -2256,7 +2265,8 @@ int usb_bam_connect_ipa(enum usb_ctrl cur_bam,
 
 	/* Check if BAM requires RESET before connect and reset first pipe */
 	spin_lock(&ctx->usb_bam_lock);
-	if (pdata->reset_on_connect && !ctx->pipes_enabled_per_bam) {
+	if (ctx->usb_bam_data->reset_on_connect &&
+		!ctx->pipes_enabled_per_bam) {
 		spin_unlock(&ctx->usb_bam_lock);
 		if (cur_bam == CI_CTRL)
 			msm_hw_bam_disable(1);
@@ -2606,8 +2616,6 @@ int usb_bam_disconnect_pipe(enum usb_ctrl bam_type, u8 idx)
 	struct usb_bam_pipe_connect *pipe_connect;
 	struct device *bam_dev = &ctx->usb_bam_pdev->dev;
 	int ret;
-	struct msm_usb_bam_platform_data *pdata =
-				ctx->usb_bam_pdev->dev.platform_data;
 
 	pipe_connect = &ctx->usb_bam_connections[idx];
 
@@ -2635,7 +2643,8 @@ int usb_bam_disconnect_pipe(enum usb_ctrl bam_type, u8 idx)
 
 	log_event_dbg("%s: success disconnecting pipe %d\n", __func__, idx);
 
-	if (pdata->reset_on_disconnect && !ctx->pipes_enabled_per_bam) {
+	if (ctx->usb_bam_data->reset_on_disconnect
+				&& !ctx->pipes_enabled_per_bam) {
 		if (bam_type == CI_CTRL)
 			msm_hw_bam_disable(1);
 
@@ -2803,10 +2812,10 @@ static void usb_bam_sps_events(enum sps_callback_case sps_cb_case, void *user)
 	}
 }
 
-static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
+static struct msm_usb_bam_data *usb_bam_dt_to_data(
 	struct platform_device *pdev, u32 usb_addr)
 {
-	struct msm_usb_bam_platform_data *pdata;
+	struct msm_usb_bam_data *usb_bam_data;
 	struct device_node *node = pdev->dev.of_node;
 	int rc = 0;
 	u8 i = 0;
@@ -2815,8 +2824,9 @@ static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
 	u32 threshold, max_connections = 0;
 	static struct usb_bam_pipe_connect *usb_bam_connections;
 
-	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata)
+	usb_bam_data = devm_kzalloc(&pdev->dev, sizeof(*usb_bam_data),
+								GFP_KERNEL);
+	if (!usb_bam_data)
 		return NULL;
 
 	rc = of_property_read_u32(node, "qcom,bam-type", &bam);
@@ -2830,7 +2840,7 @@ static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
 			__func__, bam);
 		return NULL;
 	}
-	pdata->bam_type = bam;
+	usb_bam_data->bam_type = bam;
 
 	rc = of_property_read_u32(node, "qcom,bam-mode", &bam_mode);
 	if (rc) {
@@ -2840,49 +2850,49 @@ static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
 		bam_mode = USB_BAM_DEVICE;
 	}
 
-	pdata->reset_on_connect = of_property_read_bool(node,
+	usb_bam_data->reset_on_connect = of_property_read_bool(node,
 					"qcom,reset-bam-on-connect");
 
-	pdata->reset_on_disconnect = of_property_read_bool(node,
+	usb_bam_data->reset_on_disconnect = of_property_read_bool(node,
 					"qcom,reset-bam-on-disconnect");
 
 	rc = of_property_read_u32(node, "qcom,usb-bam-num-pipes",
-		&pdata->usb_bam_num_pipes);
+		&usb_bam_data->usb_bam_num_pipes);
 	if (rc) {
 		log_event_err("Invalid usb bam num pipes property\n");
 		return NULL;
 	}
 
 	rc = of_property_read_u32(node, "qcom,usb-bam-max-mbps-highspeed",
-		&pdata->max_mbps_highspeed);
+		&usb_bam_data->max_mbps_highspeed);
 	if (rc)
-		pdata->max_mbps_highspeed = 0;
+		usb_bam_data->max_mbps_highspeed = 0;
 
 	rc = of_property_read_u32(node, "qcom,usb-bam-max-mbps-superspeed",
-		&pdata->max_mbps_superspeed);
+		&usb_bam_data->max_mbps_superspeed);
 	if (rc)
-		pdata->max_mbps_superspeed = 0;
+		usb_bam_data->max_mbps_superspeed = 0;
 
 	rc = of_property_read_u32(node, "qcom,usb-bam-fifo-baseaddr", &addr);
 	if (rc)
 		pr_debug("%s: Invalid usb base address property\n", __func__);
 	else
-		pdata->usb_bam_fifo_baseaddr = addr;
+		usb_bam_data->usb_bam_fifo_baseaddr = addr;
 
-	pdata->ignore_core_reset_ack = of_property_read_bool(node,
+	usb_bam_data->ignore_core_reset_ack = of_property_read_bool(node,
 		"qcom,ignore-core-reset-ack");
 
-	pdata->disable_clk_gating = of_property_read_bool(node,
+	usb_bam_data->disable_clk_gating = of_property_read_bool(node,
 		"qcom,disable-clk-gating");
 
 	rc = of_property_read_u32(node, "qcom,usb-bam-override-threshold",
 			&threshold);
 	if (rc)
-		pdata->override_threshold = USB_THRESHOLD;
+		usb_bam_data->override_threshold = USB_THRESHOLD;
 	else
-		pdata->override_threshold = threshold;
+		usb_bam_data->override_threshold = threshold;
 
-	pdata->enable_hsusb_bam_on_boot = of_property_read_bool(node,
+	usb_bam_data->enable_hsusb_bam_on_boot = of_property_read_bool(node,
 		"qcom,enable-hsusb-bam-on-boot");
 
 	for_each_child_of_node(pdev->dev.of_node, node)
@@ -2918,7 +2928,7 @@ static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
 			goto err;
 
 		if (usb_bam_connections[i].mem_type == OCI_MEM) {
-			if (!pdata->usb_bam_fifo_baseaddr) {
+			if (!usb_bam_data->usb_bam_fifo_baseaddr) {
 				log_event_err("%s: base address is missing\n",
 					__func__);
 				goto err;
@@ -3002,7 +3012,7 @@ static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
 	msm_usb_bam[bam].usb_bam_connections = usb_bam_connections;
 	msm_usb_bam[bam].max_connections = max_connections;
 
-	return pdata;
+	return usb_bam_data;
 err:
 	log_event_err("%s: failed\n", __func__);
 	return NULL;
@@ -3011,9 +3021,8 @@ static struct msm_usb_bam_platform_data *usb_bam_dt_to_pdata(
 static int usb_bam_init(struct platform_device *pdev)
 {
 	int ret;
-	struct msm_usb_bam_platform_data *pdata = pdev->dev.platform_data;
-	enum usb_ctrl bam_type = pdata->bam_type;
-	struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam_type];
+	struct usb_bam_ctx_type *ctx = dev_get_drvdata(&pdev->dev);
+	enum usb_ctrl bam_type = ctx->usb_bam_data->bam_type;
 	struct sps_bam_props props;
 
 	memset(&props, 0, sizeof(props));
@@ -3022,23 +3031,22 @@ static int usb_bam_init(struct platform_device *pdev)
 		bam_enable_strings[bam_type]);
 
 	props.phys_addr = ctx->io_res->start;
-	props.virt_addr = ctx->regs;
 	props.virt_size = resource_size(ctx->io_res);
 	props.irq = ctx->irq;
-	props.summing_threshold = pdata->override_threshold;
-	props.event_threshold = pdata->override_threshold;
-	props.num_pipes = pdata->usb_bam_num_pipes;
+	props.summing_threshold = ctx->usb_bam_data->override_threshold;
+	props.event_threshold = ctx->usb_bam_data->override_threshold;
+	props.num_pipes = ctx->usb_bam_data->usb_bam_num_pipes;
 	props.callback = usb_bam_sps_events;
 	props.user = bam_enable_strings[bam_type];
 
 	/*
-	 * HSUSB and HSIC Cores don't support RESET ACK signal to BAMs.
-	 * Hence let BAM to ignore acknowledge from USB while resetting PIPE.
-	 */
-	if (pdata->ignore_core_reset_ack && bam_type != DWC3_CTRL)
+	* HSUSB and HSIC Cores don't support RESET ACK signal to BAMs
+	* Hence, let BAM to ignore acknowledge from USB while resetting PIPE
+	*/
+	if (ctx->usb_bam_data->ignore_core_reset_ack && bam_type != DWC3_CTRL)
 		props.options = SPS_BAM_NO_EXT_P_RST;
 
-	if (pdata->disable_clk_gating)
+	if (ctx->usb_bam_data->disable_clk_gating)
 		props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
 
 	/*
@@ -3046,7 +3054,8 @@ static int usb_bam_init(struct platform_device *pdev)
 	 * starting peripheral controller to avoid switching USB core mode
 	 * from legacy to BAM with ongoing data transfers.
 	 */
-	if (pdata->enable_hsusb_bam_on_boot && bam_type == CI_CTRL) {
+	if (ctx->usb_bam_data->enable_hsusb_bam_on_boot
+					&& bam_type == CI_CTRL) {
 		pr_debug("Register and enable HSUSB BAM\n");
 		props.options |= SPS_BAM_OPT_ENABLE_AT_BOOT;
 	}
@@ -3063,9 +3072,8 @@ static int usb_bam_init(struct platform_device *pdev)
 static int enable_usb_bam(struct platform_device *pdev)
 {
 	int ret;
-	struct msm_usb_bam_platform_data *pdata = pdev->dev.platform_data;
-	enum usb_ctrl bam_type = pdata->bam_type;
-	struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam_type];
+	struct usb_bam_ctx_type *ctx = dev_get_drvdata(&pdev->dev);
+	enum usb_ctrl bam_type = ctx->usb_bam_data->bam_type;
 
 	ret = usb_bam_init(pdev);
 	if (ret) {
@@ -3132,14 +3140,19 @@ void usb_bam_register_panic_hdlr(void)
 			&usb_bam_panic_blk);
 }
 
+static void usb_bam_unregister_panic_hdlr(void)
+{
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+			&usb_bam_panic_blk);
+}
+
 static int usb_bam_probe(struct platform_device *pdev)
 {
 	int ret, i, irq;
 	struct resource *io_res;
-	void __iomem *regs;
 	enum usb_ctrl bam_type;
 	struct usb_bam_ctx_type *ctx;
-	struct msm_usb_bam_platform_data *pdata;
+	struct msm_usb_bam_data *usb_bam_data;
 
 	dev_dbg(&pdev->dev, "usb_bam_probe\n");
 
@@ -3155,25 +3168,19 @@ static int usb_bam_probe(struct platform_device *pdev)
 		return irq;
 	}
 
-	regs = devm_ioremap(&pdev->dev, io_res->start, resource_size(io_res));
-	if (!regs) {
-		log_event_err("%s: ioremap failed\n", __func__);
-		return -ENOMEM;
-	}
-
 	/* specify BAM physical address to be filled in BAM connections */
-	pdata = usb_bam_dt_to_pdata(pdev, io_res->start);
-	if (!pdata)
+	usb_bam_data = usb_bam_dt_to_data(pdev, io_res->start);
+	if (!usb_bam_data)
 		return -EINVAL;
 
-	pdev->dev.platform_data = pdata;
-	bam_type = pdata->bam_type;
+	bam_type = usb_bam_data->bam_type;
 	ctx = &msm_usb_bam[bam_type];
+	dev_set_drvdata(&pdev->dev, ctx);
 
 	ctx->usb_bam_pdev = pdev;
 	ctx->irq = irq;
-	ctx->regs = regs;
 	ctx->io_res = io_res;
+	ctx->usb_bam_data = usb_bam_data;
 
 	for (i = 0; i < ctx->max_connections; i++) {
 		ctx->usb_bam_connections[i].enabled = 0;
@@ -3314,15 +3321,15 @@ EXPORT_SYMBOL(usb_bam_get_bam_type);
 
 bool msm_usb_bam_enable(enum usb_ctrl bam, bool bam_enable)
 {
-	struct msm_usb_bam_platform_data *pdata;
+	struct msm_usb_bam_data *usb_bam_data;
 	struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam];
 
 	if (!ctx->usb_bam_pdev)
 		return 0;
 
-	pdata = ctx->usb_bam_pdev->dev.platform_data;
+	usb_bam_data = ctx->usb_bam_data;
 	if ((bam != CI_CTRL) || !(bam_enable ||
-					pdata->enable_hsusb_bam_on_boot))
+					usb_bam_data->enable_hsusb_bam_on_boot))
 		return 0;
 
 	msm_hw_bam_disable(1);
@@ -3404,10 +3411,11 @@ EXPORT_SYMBOL(msm_bam_hsic_lpm_ok);
 
 static int usb_bam_remove(struct platform_device *pdev)
 {
-	struct msm_usb_bam_platform_data *pdata = pdev->dev.platform_data;
-	enum usb_ctrl bam_type = pdata->bam_type;
-	struct usb_bam_ctx_type *ctx = &msm_usb_bam[bam_type];
+	struct usb_bam_ctx_type *ctx = dev_get_drvdata(&pdev->dev);
 
+	usb_bam_ipa_delete_resources(ctx->usb_bam_data->bam_type);
+	usb_bam_unregister_panic_hdlr();
+	sps_deregister_bam_device(ctx->h_bam);
 	destroy_workqueue(ctx->usb_bam_wq);
 
 	return 0;
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 9f04957..8d023a8 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -37,6 +37,7 @@
 #define EMERGENCY_DLOAD_MAGIC1    0x322A4F99
 #define EMERGENCY_DLOAD_MAGIC2    0xC67E4350
 #define EMERGENCY_DLOAD_MAGIC3    0x77777777
+#define EMMC_DLOAD_TYPE		0x2
 
 #define SCM_IO_DISABLE_PMIC_ARBITER	1
 #define SCM_IO_DEASSERT_PS_HOLD		2
@@ -47,13 +48,14 @@
 
 
 static int restart_mode;
-void *restart_reason;
+static void __iomem *restart_reason, *dload_type_addr;
 static bool scm_pmic_arbiter_disable_supported;
 static bool scm_deassert_ps_hold_supported;
 /* Download mode master kill-switch */
 static void __iomem *msm_ps_hold;
 static phys_addr_t tcsr_boot_misc_detect;
 static int download_mode = 1;
+static struct kobject dload_kobj;
 
 #ifdef CONFIG_QCOM_DLOAD_MODE
 #define EDL_MODE_PROP "qcom,msm-imem-emergency_download_mode"
@@ -72,8 +74,23 @@ static void *kaslr_imem_addr;
 static bool scm_dload_supported;
 
 static int dload_set(const char *val, struct kernel_param *kp);
+/* interface for exporting attributes */
+struct reset_attribute {
+	struct attribute        attr;
+	ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+			char *buf);
+	size_t (*store)(struct kobject *kobj, struct attribute *attr,
+			const char *buf, size_t count);
+};
+#define to_reset_attr(_attr) \
+	container_of(_attr, struct reset_attribute, attr)
+#define RESET_ATTR(_name, _mode, _show, _store)	\
+	static struct reset_attribute reset_attr_##_name = \
+			__ATTR(_name, _mode, _show, _store)
+
 module_param_call(download_mode, dload_set, param_get_int,
 			&download_mode, 0644);
+
 static int panic_prep_restart(struct notifier_block *this,
 			      unsigned long event, void *ptr)
 {
@@ -85,7 +102,7 @@ static struct notifier_block panic_blk = {
 	.notifier_call	= panic_prep_restart,
 };
 
-int scm_set_dload_mode(int arg1, int arg2)
+static int scm_set_dload_mode(int arg1, int arg2)
 {
 	struct scm_desc desc = {
 		.args[0] = arg1,
@@ -397,6 +414,84 @@ static void do_msm_poweroff(void)
 	pr_err("Powering off has failed\n");
 }
 
+static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	struct reset_attribute *reset_attr = to_reset_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (reset_attr->show)
+		ret = reset_attr->show(kobj, attr, buf);
+
+	return ret;
+}
+
+static ssize_t attr_store(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct reset_attribute *reset_attr = to_reset_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (reset_attr->store)
+		ret = reset_attr->store(kobj, attr, buf, count);
+
+	return ret;
+}
+
+static const struct sysfs_ops reset_sysfs_ops = {
+	.show	= attr_show,
+	.store	= attr_store,
+};
+
+static struct kobj_type reset_ktype = {
+	.sysfs_ops	= &reset_sysfs_ops,
+};
+
+static ssize_t show_emmc_dload(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	uint32_t read_val, show_val;
+
+	read_val = __raw_readl(dload_type_addr);
+	if (read_val == EMMC_DLOAD_TYPE)
+		show_val = 1;
+	else
+		show_val = 0;
+
+	return snprintf(buf, sizeof(show_val), "%u\n", show_val);
+}
+
+static size_t store_emmc_dload(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	uint32_t enabled;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &enabled);
+	if (ret < 0)
+		return ret;
+
+	if (!((enabled == 0) || (enabled == 1)))
+		return -EINVAL;
+
+	if (enabled == 1)
+		__raw_writel(EMMC_DLOAD_TYPE, dload_type_addr);
+	else
+		__raw_writel(0, dload_type_addr);
+
+	return count;
+}
+RESET_ATTR(emmc_dload, 0644, show_emmc_dload, store_emmc_dload);
+
+static struct attribute *reset_attrs[] = {
+	&reset_attr_emmc_dload.attr,
+	NULL
+};
+
+static struct attribute_group reset_attr_group = {
+	.attrs = reset_attrs,
+};
+
 static int msm_restart_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -448,6 +543,33 @@ static int msm_restart_probe(struct platform_device *pdev)
 		iounmap(kaslr_imem_addr);
 	}
 #endif
+	np = of_find_compatible_node(NULL, NULL,
+				"qcom,msm-imem-dload-type");
+	if (!np) {
+		pr_err("unable to find DT imem dload-type node\n");
+		goto skip_sysfs_create;
+	} else {
+		dload_type_addr = of_iomap(np, 0);
+		if (!dload_type_addr) {
+			pr_err("unable to map imem dload-type offset\n");
+			goto skip_sysfs_create;
+		}
+	}
+
+	ret = kobject_init_and_add(&dload_kobj, &reset_ktype,
+			kernel_kobj, "%s", "dload");
+	if (ret) {
+		pr_err("%s:Error in creation kobject_add\n", __func__);
+		kobject_put(&dload_kobj);
+		goto skip_sysfs_create;
+	}
+
+	ret = sysfs_create_group(&dload_kobj, &reset_attr_group);
+	if (ret) {
+		pr_err("%s:Error in creation sysfs_create_group\n", __func__);
+		kobject_del(&dload_kobj);
+	}
+skip_sysfs_create:
 #endif
 	np = of_find_compatible_node(NULL, NULL,
 				"qcom,msm-imem-restart_reason");
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 4303960..3ca5def 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -507,6 +507,7 @@ extern int fg_direct_mem_write(struct fg_chip *chip, u16 address,
 extern int fg_read(struct fg_chip *chip, int addr, u8 *val, int len);
 extern int fg_write(struct fg_chip *chip, int addr, u8 *val, int len);
 extern int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val);
+extern int fg_dump_regs(struct fg_chip *chip);
 extern int fg_ima_init(struct fg_chip *chip);
 extern int fg_dma_init(struct fg_chip *chip);
 extern int fg_clear_ima_errors_if_any(struct fg_chip *chip, bool check_hw_sts);
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index 0abc9df..c1b5adc 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -792,6 +792,7 @@ static int fg_direct_mem_request(struct fg_chip *chip, bool request)
 		} else {
 			pr_err("wait for mem_grant timed out ret=%d\n",
 				ret);
+			fg_dump_regs(chip);
 		}
 	}
 
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
index aed2062..fc60b26 100644
--- a/drivers/power/supply/qcom/fg-util.c
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -492,6 +492,33 @@ int fg_masked_write(struct fg_chip *chip, int addr, u8 mask, u8 val)
 	return rc;
 }
 
+int fg_dump_regs(struct fg_chip *chip)
+{
+	int i, rc;
+	u8 buf[256];
+
+	if (!chip)
+		return -EINVAL;
+
+	rc = fg_read(chip, chip->batt_soc_base, buf, sizeof(buf));
+	if (rc < 0)
+		return rc;
+
+	pr_info("batt_soc_base registers:\n");
+	for (i = 0; i < sizeof(buf); i++)
+		pr_info("%04x:%02x\n", chip->batt_soc_base + i, buf[i]);
+
+	rc = fg_read(chip, chip->mem_if_base, buf, sizeof(buf));
+	if (rc < 0)
+		return rc;
+
+	pr_info("mem_if_base registers:\n");
+	for (i = 0; i < sizeof(buf); i++)
+		pr_info("%04x:%02x\n", chip->mem_if_base + i, buf[i]);
+
+	return 0;
+}
+
 int64_t twos_compliment_extend(int64_t val, int sign_bit_pos)
 {
 	int i, nbytes = DIV_ROUND_UP(sign_bit_pos, 8);
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index b02c860..fc34a8c9 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -3730,6 +3730,7 @@ static int fg_notifier_cb(struct notifier_block *nb,
 		return NOTIFY_OK;
 
 	if ((strcmp(psy->desc->name, "battery") == 0)
+		|| (strcmp(psy->desc->name, "parallel") == 0)
 		|| (strcmp(psy->desc->name, "usb") == 0)) {
 		/*
 		 * We cannot vote for awake votable here as that takes
@@ -4116,8 +4117,7 @@ static irqreturn_t fg_dma_grant_irq_handler(int irq, void *data)
 	}
 
 	fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
-	if (status & MEM_GNT_BIT)
-		complete_all(&chip->mem_grant);
+	complete_all(&chip->mem_grant);
 
 	return IRQ_HANDLED;
 }
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index a8fb8b6..c94f915 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -71,6 +71,16 @@
 	  deadlock detection mode AMON will trigger an interrupt if some LLCC request
 	  ages out.
 
+config QCOM_LLCC_PERFMON
+	tristate "Qualcomm Technologies, Inc. LLCC Perfmon driver"
+	depends on QCOM_LLCC
+	help
+	  This option enables driver for LLCC Performance monitor block. Using
+	  this various events in different LLCC sub block ports can be monitored.
+	  This is used for performance and debug activity and exports SYSFS
+	  interface. SYSFS interface used for configure and dump the LLCC
+	  performance events.
+
 config QCOM_PM
 	bool "Qualcomm Power Management"
 	depends on ARCH_QCOM && !ARM64
@@ -375,16 +385,6 @@
 	  32-bit values by specifying a unique string and
 	  remote processor ID.
 
-config MSM_SMP2P_TEST
-	bool "SMSM Point-to-Point Test"
-	depends on MSM_SMP2P
-	help
-	  Enables loopback and unit testing support for
-	  SMP2P. Loopback support is used by other
-	  processors to do unit testing. Unit tests
-	  are used to verify the local and remote
-	  implementations.
-
 config MSM_IPC_ROUTER_SMD_XPRT
 	depends on MSM_SMD
 	depends on IPC_ROUTER
@@ -537,16 +537,6 @@
 	  online at any given point in time. This module can also restrict
 	  max freq or min freq of cpu cluster
 
-config MSM_PERFORMANCE_HOTPLUG_ON
-	bool "Hotplug functionality through msm_performance turned on"
-	depends on MSM_PERFORMANCE
-	default y
-	help
-	  If some other core-control driver is present turn off the core-control
-	  capability of msm_performance driver. Setting this flag to false will
-	  compile out the nodes needed for core-control functionality through
-	  msm_performance.
-
 config MSM_CDSP_LOADER
 	tristate "CDSP loader support"
 	depends on MSM_GLINK
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 37b33e6..4966c04 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_QCOM_LLCC) += llcc-core.o llcc-slice.o
 obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
 obj-$(CONFIG_QCOM_SDM670_LLCC) += llcc-sdm670.o
+obj-$(CONFIG_QCOM_LLCC_PERFMON) += llcc_perfmon.o
 obj-$(CONFIG_QCOM_LLCC_AMON) += llcc-amon.o
 obj-$(CONFIG_QPNP_PBS) += qpnp-pbs.o
 obj-$(CONFIG_QCOM_PM)	+=	spm.o
@@ -42,8 +43,7 @@
 obj-$(CONFIG_QTI_SYSTEM_PM) += system_pm.o
 obj-$(CONFIG_MSM_SERVICE_NOTIFIER) += service-notifier.o
 obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
-obj-$(CONFIG_MSM_SMP2P) += msm_smp2p.o smp2p_debug.o smp2p_sleepstate.o
-obj-$(CONFIG_MSM_SMP2P_TEST) += smp2p_loopback.o smp2p_test.o smp2p_spinlock_test.o
+obj-$(CONFIG_MSM_SMP2P) += msm_smp2p.o smp2p_loopback.o smp2p_debug.o smp2p_sleepstate.o
 obj-$(CONFIG_MSM_IPC_ROUTER_SMD_XPRT) += ipc_router_smd_xprt.o
 obj-$(CONFIG_MSM_IPC_ROUTER_HSIC_XPRT) += ipc_router_hsic_xprt.o
 obj-$(CONFIG_MSM_IPC_ROUTER_MHI_XPRT) += ipc_router_mhi_xprt.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 2a23ba7..e11efb0 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -148,11 +148,6 @@ struct dcc_drvdata {
 	struct class		*sram_class;
 	struct list_head	cfg_head[DCC_MAX_LINK_LIST];
 	uint32_t		nr_config[DCC_MAX_LINK_LIST];
-	void			*reg_buf;
-	struct msm_dump_data	reg_data;
-	bool			save_reg;
-	void			*sram_buf;
-	struct msm_dump_data	sram_data;
 	uint8_t			curr_list;
 	uint8_t			cti_trig;
 };
@@ -490,39 +485,6 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list)
 	return ret;
 }
 
-static void __dcc_reg_dump(struct dcc_drvdata *drvdata)
-{
-	uint32_t *reg_buf;
-	uint8_t i = 0;
-	uint8_t j;
-
-	if (!drvdata->reg_buf)
-		return;
-
-	drvdata->reg_data.version = DCC_REG_DUMP_VER;
-
-	reg_buf = drvdata->reg_buf;
-
-	reg_buf[i++] = dcc_readl(drvdata, DCC_HW_VERSION);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_HW_INFO);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_EXEC_CTRL);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_STATUS);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_CFG);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_FDA_CURR);
-	reg_buf[i++] = dcc_readl(drvdata, DCC_LLA_CURR);
-
-	for (j = 0; j < DCC_MAX_LINK_LIST; j++)
-		reg_buf[i++] = dcc_readl(drvdata, DCC_LL_LOCK(j));
-	for (j = 0; j < DCC_MAX_LINK_LIST; j++)
-		reg_buf[i++] = dcc_readl(drvdata, DCC_LL_CFG(j));
-	for (j = 0; j < DCC_MAX_LINK_LIST; j++)
-		reg_buf[i++] = dcc_readl(drvdata, DCC_LL_BASE(j));
-	for (j = 0; j < DCC_MAX_LINK_LIST; j++)
-		reg_buf[i++] = dcc_readl(drvdata, DCC_FD_BASE(j));
-
-	drvdata->reg_data.magic = DCC_REG_DUMP_MAGIC_V2;
-}
-
 static void __dcc_first_crc(struct dcc_drvdata *drvdata)
 {
 	int i;
@@ -626,9 +588,6 @@ static int dcc_enable(struct dcc_drvdata *drvdata)
 					   DCC_LL_INT_ENABLE(list));
 		}
 	}
-	/* Save DCC registers */
-	if (drvdata->save_reg)
-		__dcc_reg_dump(drvdata);
 
 err:
 	mutex_unlock(&drvdata->mutex);
@@ -653,9 +612,6 @@ static void dcc_disable(struct dcc_drvdata *drvdata)
 	}
 	drvdata->ram_cfg = 0;
 	drvdata->ram_start = 0;
-	/* Save DCC registers */
-	if (drvdata->save_reg)
-		__dcc_reg_dump(drvdata);
 
 	mutex_unlock(&drvdata->mutex);
 }
@@ -1462,47 +1418,6 @@ static void dcc_sram_dev_exit(struct dcc_drvdata *drvdata)
 	dcc_sram_dev_deregister(drvdata);
 }
 
-static void dcc_allocate_dump_mem(struct dcc_drvdata *drvdata)
-{
-	int ret;
-	struct device *dev = drvdata->dev;
-	struct msm_dump_entry reg_dump_entry, sram_dump_entry;
-
-	/* Allocate memory for dcc reg dump */
-	drvdata->reg_buf = devm_kzalloc(dev, drvdata->reg_size, GFP_KERNEL);
-	if (drvdata->reg_buf) {
-		drvdata->reg_data.addr = virt_to_phys(drvdata->reg_buf);
-		drvdata->reg_data.len = drvdata->reg_size;
-		reg_dump_entry.id = MSM_DUMP_DATA_DCC_REG;
-		reg_dump_entry.addr = virt_to_phys(&drvdata->reg_data);
-		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
-					     &reg_dump_entry);
-		if (ret) {
-			dev_err(dev, "DCC REG dump setup failed\n");
-			devm_kfree(dev, drvdata->reg_buf);
-		}
-	} else {
-		dev_err(dev, "DCC REG dump allocation failed\n");
-	}
-
-	/* Allocate memory for dcc sram dump */
-	drvdata->sram_buf = devm_kzalloc(dev, drvdata->ram_size, GFP_KERNEL);
-	if (drvdata->sram_buf) {
-		drvdata->sram_data.addr = virt_to_phys(drvdata->sram_buf);
-		drvdata->sram_data.len = drvdata->ram_size;
-		sram_dump_entry.id = MSM_DUMP_DATA_DCC_SRAM;
-		sram_dump_entry.addr = virt_to_phys(&drvdata->sram_data);
-		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
-					     &sram_dump_entry);
-		if (ret) {
-			dev_err(dev, "DCC SRAM dump setup failed\n");
-			devm_kfree(dev, drvdata->sram_buf);
-		}
-	} else {
-		dev_err(dev, "DCC SRAM dump allocation failed\n");
-	}
-}
-
 static int dcc_probe(struct platform_device *pdev)
 {
 	int ret, i;
@@ -1542,9 +1457,6 @@ static int dcc_probe(struct platform_device *pdev)
 	if (ret)
 		return -EINVAL;
 
-	drvdata->save_reg = of_property_read_bool(pdev->dev.of_node,
-						  "qcom,save-reg");
-
 	mutex_init(&drvdata->mutex);
 
 	for (i = 0; i < DCC_MAX_LINK_LIST; i++) {
@@ -1580,7 +1492,6 @@ static int dcc_probe(struct platform_device *pdev)
 	if (ret)
 		goto err;
 
-	dcc_allocate_dump_mem(drvdata);
 	return 0;
 err:
 	return ret;
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index 51c08c6..f7f3317 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -27,6 +27,8 @@
 #include <linux/serial.h>
 #include <linux/workqueue.h>
 #include <linux/power_supply.h>
+#include <linux/clk.h>
+#include <linux/of.h>
 
 #define EUD_ENABLE_CMD 1
 #define EUD_DISABLE_CMD 0
@@ -71,6 +73,7 @@ struct eud_chip {
 	struct uart_port		port;
 	struct work_struct		eud_work;
 	struct power_supply		*batt_psy;
+	struct clk			*cfg_ahb_clk;
 };
 
 static const unsigned int eud_extcon_cable[] = {
@@ -119,7 +122,7 @@ static void enable_eud(struct platform_device *pdev)
 		/* write into CSR to enable EUD */
 		writel_relaxed(BIT(0), priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
 		/* Enable vbus, chgr & safe mode warning interrupts */
-		writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
+		writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR,
 				priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
 
 		/* Ensure Register Writes Complete */
@@ -448,7 +451,11 @@ static irqreturn_t handle_eud_irq(int irq, void *data)
 {
 	struct eud_chip *chip = data;
 	u32 reg;
-	u32 int_mask_en1 = readl_relaxed(chip->eud_reg_base +
+	u32 int_mask_en1;
+
+	clk_prepare_enable(chip->cfg_ahb_clk);
+
+	int_mask_en1 = readl_relaxed(chip->eud_reg_base +
 					EUD_REG_INT1_EN_MASK);
 
 	/* read status register and find out which interrupt triggered */
@@ -472,9 +479,11 @@ static irqreturn_t handle_eud_irq(int irq, void *data)
 		pet_eud(chip);
 	} else {
 		dev_dbg(chip->dev, "Unknown/spurious EUD Interrupt!\n");
+		clk_disable_unprepare(chip->cfg_ahb_clk);
 		return IRQ_NONE;
 	}
 
+	clk_disable_unprepare(chip->cfg_ahb_clk);
 	return IRQ_HANDLED;
 }
 
@@ -492,6 +501,7 @@ static int msm_eud_probe(struct platform_device *pdev)
 	}
 
 	platform_set_drvdata(pdev, chip);
+	chip->dev = &pdev->dev;
 
 	chip->extcon = devm_extcon_dev_allocate(&pdev->dev, eud_extcon_cable);
 	if (IS_ERR(chip->extcon)) {
@@ -517,10 +527,25 @@ static int msm_eud_probe(struct platform_device *pdev)
 	if (IS_ERR(chip->eud_reg_base))
 		return PTR_ERR(chip->eud_reg_base);
 
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "cfg_ahb_clk") >= 0) {
+		chip->cfg_ahb_clk = devm_clk_get(&pdev->dev, "cfg_ahb_clk");
+		if (IS_ERR(chip->cfg_ahb_clk)) {
+			ret = PTR_ERR(chip->cfg_ahb_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_err(chip->dev,
+				"clk get failed for cfg_ahb_clk ret %d\n",
+				ret);
+			return ret;
+		}
+	}
+
 	chip->eud_irq = platform_get_irq_byname(pdev, "eud_irq");
 
-	ret = devm_request_irq(&pdev->dev, chip->eud_irq, handle_eud_irq,
-				IRQF_TRIGGER_HIGH, "eud_irq", chip);
+	ret = devm_request_threaded_irq(&pdev->dev, chip->eud_irq,
+					NULL, handle_eud_irq,
+					IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+					"eud_irq", chip);
 	if (ret) {
 		dev_err(chip->dev, "request failed for eud irq\n");
 		return ret;
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 6ce481b..9742f7a 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2636,7 +2636,10 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
 	clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
 
 	fw_down_data.crashed = event_data->crashed;
-	icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
+	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state) &&
+	      !test_bit(ICNSS_PD_RESTART, &priv->state))
+		icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
+					 &fw_down_data);
 	icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
 				ICNSS_EVENT_SYNC, event_data);
 done:
diff --git a/drivers/soc/qcom/llcc_events.h b/drivers/soc/qcom/llcc_events.h
new file mode 100644
index 0000000..28f4594
--- /dev/null
+++ b/drivers/soc/qcom/llcc_events.h
@@ -0,0 +1,301 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SOC_QCOM_LLCC_EVENTS_H_
+#define _SOC_QCOM_LLCC_EVENTS_H_
+
+enum event_port_select {
+	EVENT_PORT_FEAC,
+	EVENT_PORT_FERC,
+	EVENT_PORT_FEWC,
+	EVENT_PORT_BEAC,
+	EVENT_PORT_BERC,
+	EVENT_PORT_TRP,
+	EVENT_PORT_DRP,
+	EVENT_PORT_PMGR,
+	EVENT_PORT_TENURE,
+	EVENT_PORT_TLAT,
+};
+
+enum feac_events {
+	FEAC_ANY_ACCESS,
+	FEAC_READ_INCR,
+	FEAC_WRITE_INCR,
+	FEAC_WRITE_ORDERED,
+	FEAC_READE_EXCL,
+	FEAC_WRITE_EXCL,
+	FEAC_CMO,
+	FEAC_CMO_CLEAN,
+	FEAC_CMO_INVAL,
+	FEAC_CMO_CLEANINVAL,
+	FEAC_CMO_DCPLD,
+	FEAC_READ_NOALLOC,
+	FEAC_WRITE_NOALLOC,
+	FEAC_PREFETCH,
+	FEAC_RD_BYTES,
+	FEAC_RD_BEATS,
+	FEAC_WR_BYTES,
+	FEAC_WR_BEATS,
+	FEAC_FC_READ,
+	FEAC_EWD_ACCESS,
+	FEAC_TCM_ACCESS,
+	FEAC_GM_HIT,
+	FEAC_GM_MISS,
+	FEAC_GM_UNAVAILABLE,
+	FEAC_XPU_ERROR,
+	FEAC_READ_HAZARD,
+	FEAC_WRITE_HAZARD,
+	FEAC_GRANULE_READ,
+	FEAC_GRANULE_WRITE,
+	FEAC_RIFB_ALLOC,
+	FEAC_WIFB_ALLOC,
+	FEAC_RIFB_DEALLOC,
+	FEAC_WIFB_DEALLOC,
+	FEAC_RESERVED,
+	FEAC_RESERVED1,
+	FEAC_FEAC2TRP_LP_TX,
+	FEAC_TRP_LP_BUSY,
+	FEAC_FEAC2TRP_HP_TX,
+	FEAC_TRP_HP_BUSY,
+	FEAC_FEAC2FEWC_TX,
+	FEAC_BEAC_LP_BUSY,
+	FEAC_BEAC_HP_BUSY,
+	FEAC_RIFB_FULL,
+	FEAC_WIFB_FULL,
+	FEAC_RD_CRDT_TX,
+	FEAC_WR_CRDT_TX,
+	FEAC_PROMOTION,
+	FEAC_FEAC2TRP_LP_PRESSURE,
+	FEAC_FEAC2TRP_HP_PRESSURE,
+	FEAC_FEAC2FEWC_PRESSURE,
+	FEAC_FEAC2BEAC_LP_PRESSURE,
+	FEAC_FEAC2BEAC_HP_PRESSURE,
+	FEAC_WR_THROUGH,
+};
+
+enum ferc_events {
+	FERC_BERC_CMD,
+	FERC_BERC_BEAT,
+	FERC_DRP_CMD,
+	FERC_DRP_BEAT,
+	FERC_RD_CTRL_RSP_TX,
+	FERC_WR_CTRL_RSP_TX,
+	FERC_RD_DATA_TX,
+	FERC_MISS_TRUMPS_HIT,
+	FERC_HIT_TRUMPS_WRSP,
+	FERC_RD_INTRA_RSP_IDLE,
+};
+
+enum fewc_events {
+	FEWC_WR_CMD,
+	FEWC_WR_DATA_BEAT,
+	FEWC_WR_LAST,
+	FEWC_WBUF_DEALLOC,
+	FEWC_WR_HIT,
+	FEWC_WR_MISS,
+	FEWC_NC_RMW,
+	FEWC_WR_DOWNGRADE,
+	FEWC_BEAC_WR_CMD,
+	FEWC_BEAC_WR_BEAT,
+	FEWC_BEAC_RD_CMD,
+	FEWC_BERC_FILL_BEAT,
+	FEWC_DRP_WR_CMD,
+	FEWC_DRP_WR_BEAT,
+	FEWC_DRP_RD_BEAT,
+	FEWC_TRP_TAG_LOOKUP,
+	FEWC_TRP_TAG_UPDATE,
+	FEWC_TRP_UNSTALL,
+	FEWC_WBUFFS_FULL,
+	FEWC_DRP_BUSY,
+	FEWC_BEAC_WR_BUSY,
+	FEWC_BEAC_RD_BUSY,
+	FEWC_TRP_TAG_LOOKUP_BUSY,
+	FEWC_TRP_TAG_UPDATE_BUSY,
+	FEWC_C_RMW,
+	FEWC_NC_ALLOC_RMW,
+	FEWC_NC_NO_ALLOC_RMW,
+	FEWC_NC_RMW_DEALLOC,
+	FEWC_C_RMW_DEALLOC,
+	FEWC_STALLED_BY_EVICT,
+};
+
+enum beac_events {
+	BEAC_RD_TX,
+	BEAC_WR_TX,
+	BEAC_RD_GRANULE,
+	BEAC_WR_GRANULE,
+	BEAC_WR_BEAT_TX,
+	BEAC_RD_CRDT_ZERO,
+	BEAC_WR_CRDT_ZERO,
+	BEAC_WDATA_CRDT_ZERO,
+	BEAC_IFCMD_CRDT_ZERO,
+	BEAC_IFWDATA_CRDT_ZERO,
+	BEAC_PCT_ENTRY_ALLOC,
+	BEAC_PCT_ENTRY_FREE,
+	BEAC_PCT_FULL,
+	BEAC_RD_PROMOTION_TX,
+	BEAC_WR_PROMOTION_TX,
+	BEAC_RD_PRESSURE_TX,
+	BEAC_WR_PRESSURE_TX,
+};
+
+enum berc_events {
+	BERC_RD_CMD,
+	BERC_ERROR_CMD,
+	BERC_PCT_ENTRY_DEALLOC,
+	BERC_RD_RSP_RX,
+	BERC_RD_RSP_BEAT_RX,
+	BERC_RD_LA_RX,
+	BERC_UNSTALL_RX,
+	BERC_TX_RD_CMD,
+	BERC_TX_ERR_CMD,
+	BERC_TX_RD_BEAT,
+	BERC_TX_ERR_BEAT,
+	BERC_RESERVED,
+	BERC_RESERVED1,
+	BERC_CMO_RX,
+	BERC_CMO_TX,
+	BERC_DRP_WR_TX,
+	BERC_DRP_WR_BEAT_TX,
+	BERC_FEWC_WR_TX,
+	BERC_FEWC_WR_BEAT_TX,
+	BERC_LBUFFS_FULL,
+	BERC_DRP_BUSY,
+	BERC_FEWC_BUSY,
+	BERC_LBUFF_STALLED,
+};
+
+enum trp_events {
+	TRP_ANY_ACCESS,
+	TRP_INCR_RD,
+	TRP_INCR_WR,
+	TRP_ANY_HIT,
+	TRP_RD_HIT,
+	TRP_WR_HIT,
+	TRP_RD_MISS,
+	TRP_WR_MISS,
+	TRP_RD_HIT_MISS,
+	TRP_WR_HIT_MISS,
+	TRP_EVICT,
+	TRP_GRANULE_EVICT,
+	TRP_RD_EVICT,
+	TRP_WR_EVICT,
+	TRP_LINE_FILL,
+	TRP_GRANULE_FILL,
+	TRP_WSC_WRITE,
+	TRP_WSC_EVICT,
+	TRP_SUBCACHE_ACT,
+	TRP_SUBCACHE_DEACT,
+	TRP_RD_DEACTIVE_SUBCACHE,
+	TRP_WR_DEACTIVE_SUBCACHE,
+	TRP_INVALID_LINE_ALLOC,
+	TRP_DEACTIVE_LINE_ALLOC,
+	TRP_SELF_EVICTION_ALLOC,
+	TRP_UC_SUBCACHE_ALLOC,
+	TRP_FC_SELF_EVICTION_ALLOC,
+	TRP_LP_SUBCACHE_VICTIM,
+	TRP_OC_SUBCACHE_VICTIM,
+	TRP_MRU_ROLLOVER,
+	TRP_NC_DOWNGRADE,
+	TRP_TAGRAM_CORR_ERR,
+	TRP_TAGRAM_UNCORR_ERR,
+	TRP_RD_MISS_FC,
+	TRP_CPU_WRITE_EWD_LINE,
+	TRP_CLIENT_WRITE_EWD_LINE,
+	TRP_CLIENT_READ_EWD_LINE,
+	TRP_CMO_I_EWD_LINE,
+	TRP_CMO_I_DIRTY_LINE,
+	TRP_DRP_RD_NOTIFICATION,
+	TRP_DRP_WR_NOTIFICATION,
+	TRP_LINEFILL_TAG_UPDATE,
+	TRP_FEWC_TAG_UPDATE,
+	TRP_ET_FULL,
+	TRP_NAWT_FULL,
+	TRP_HITQ_FULL,
+	TRP_ET_ALLOC,
+	TRP_ET_DEALLOC,
+	TRP_NAWT_ALLOC,
+	TRP_NAWT_DEALLOC,
+	TRP_RD_REPLAY,
+	TRP_WR_ECC_RD,
+	TRP_ET_LP_FULL,
+	TRP_ET_HP_FULL,
+	TRP_SOEH,
+};
+
+enum drp_events {
+	DRP_TRP_RD_NOTIFICATION,
+	DRP_TRP_WR_NOTIFICATION,
+	DRP_BIST_WR_NOTIFICATION,
+	DRP_DRIE_WR_NOTIFICATION,
+	DRP_ECC_CORR_ERR,
+	DRP_ECC_UNCORR_ERR,
+	DRP_FERC_RD_TX,
+	DRP_FEWC_RD_TX,
+	DRP_EVICT_LINE_TX,
+	DRP_EVICT_GRANULE_TX,
+	DRP_BIST_TX,
+	DRP_FERC_RD_BEAT,
+	DRP_FEWC_RD_BEAT,
+	DRP_BIST_RD_BEAT,
+	DRP_EVICT_RD_BEAT,
+	DRP_BERC_WR_BEAT,
+	DRP_FEWC_WR_BEAT,
+	DRP_BIST_WR_BEAT,
+	DRP_DRIE_WR_BEAT,
+	DRP_BERC_UNSTALL,
+	DRP_FEWC_UNSTALL,
+	DRP_LB_RD,
+	DRP_LB_WR,
+	DRP_BANK_CONFLICT,
+	DRP_FILL_TRUMPS_RD,
+	DRP_RD_TRUMPS_WR,
+	DRP_LB_SLP_RET,
+	DRP_LB_SLP_NRET,
+	DRP_LB_WAKEUP,
+	DRP_TRP_EARLY_WAKEUP,
+	DRP_PCB_IDLE,
+	DRP_EVICT_RDFIFO_FULL,
+	DRP_FEWC_RDFIFO_FULL,
+	DRP_FERC_RDFIFO_FULL,
+	DRP_FERC_RD,
+	DRP_FEWC_RD,
+	DRP_LINE_EVICT,
+	DRP_GRANULE_EVICT,
+	DRP_BIST_RD,
+	DRP_FEWC_WR,
+	DRP_LINE_FILL,
+	DRP_GRANULE_FILL,
+	DRP_BIST_WR,
+	DRP_DRIE_WR,
+};
+
+enum pmgr_events {
+	PMGR_Q_RUN_STATE,
+	PMGR_Q_DENIED_STATE,
+	PMGR_Q_STOPEED_TO_Q_RUN,
+	PMGR_Q_RUN_TO_Q_FENCED,
+	PMGR_Q_RUN_TO_Q_DENIED,
+	PMGR_Q_DENIED_TO_Q_RUN,
+	PMGR_Q_FENCED_TO_Q_STOPPED,
+	PMGR_Q_FENCED_TO_Q_DENIED,
+};
+
+enum filter_type {
+	SCID,
+	MID,
+	PROFILING_TAG,
+	WAY_ID,
+	UNKNOWN,
+};
+
+#endif /* _SOC_QCOM_LLCC_EVENTS_H_ */
diff --git a/drivers/soc/qcom/llcc_perfmon.c b/drivers/soc/qcom/llcc_perfmon.c
new file mode 100644
index 0000000..39276a9
--- /dev/null
+++ b/drivers/soc/qcom/llcc_perfmon.c
@@ -0,0 +1,1203 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/hrtimer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+#include <linux/module.h>
+#include "llcc_events.h"
+#include "llcc_perfmon.h"
+
+#define LLCC_PERFMON_NAME		"llcc_perfmon"
+#define LLCC_PERFMON_COUNTER_MAX	16
+#define MAX_NUMBER_OF_PORTS		8
+#define NUM_CHANNELS			16
+#define MAX_STRING_SIZE			20
+#define DELIM_CHAR			" "
+
+/**
+ * struct llcc_perfmon_counter_map	- llcc perfmon counter map info
+ * @port_sel:		Port selected for configured counter
+ * @event_sel:		Event selected for configured counter
+ * @counter_dump:	Cumulative counter dump
+ */
+struct llcc_perfmon_counter_map {
+	unsigned int port_sel;
+	unsigned int event_sel;
+	unsigned long long counter_dump;
+};
+
+struct llcc_perfmon_private;
+/**
+ * struct event_port_ops		- event port operation
+ * @event_config:		Counter config support for port &  event
+ * @event_enable:		Counter enable support for port
+ * @event_filter_config:	Port filter config support
+ */
+struct event_port_ops {
+	void (*event_config)(struct llcc_perfmon_private *,
+			unsigned int, unsigned int, bool);
+	void (*event_enable)(struct llcc_perfmon_private *, bool);
+	void (*event_filter_config)(struct llcc_perfmon_private *,
+			enum filter_type, unsigned long, bool);
+};
+
+/**
+ * struct llcc_perfmon_private	- llcc perfmon private
+ * @llcc_map:		llcc register address space map
+ * @broadcast_off:	Offset of llcc broadcast address space
+ * @bank_off:		Offset of llcc banks
+ * @num_banks:		Number of banks supported
+ * @port_ops:		struct event_port_ops
+ * @configured:		Mapping of configured event counters
+ * @configured_counters:
+ *			Count of configured counters.
+ * @enables_port:	Port enabled for perfmon configuration
+ * @filtered_ports:	Port filter enabled
+ * @port_configd:	Number of perfmon port configuration supported
+ * @mutex:		mutex to protect this structure
+ * @hrtimer:		hrtimer instance for timer functionality
+ * @expires:		timer expire time in nano seconds
+ */
+struct llcc_perfmon_private {
+	struct regmap *llcc_map;
+	unsigned int broadcast_off;
+	unsigned int bank_off[NUM_CHANNELS];
+	unsigned int num_banks;
+	struct event_port_ops *port_ops[MAX_NUMBER_OF_PORTS];
+	struct llcc_perfmon_counter_map configured[LLCC_PERFMON_COUNTER_MAX];
+	unsigned int configured_counters;
+	unsigned int enables_port;
+	unsigned int filtered_ports;
+	unsigned int port_configd;
+	struct mutex mutex;
+	struct hrtimer hrtimer;
+	ktime_t expires;
+};
+
+static inline void llcc_bcast_write(struct llcc_perfmon_private *llcc_priv,
+			unsigned int offset, uint32_t val)
+{
+	regmap_write(llcc_priv->llcc_map, llcc_priv->broadcast_off + offset,
+			val);
+}
+
+static inline void llcc_bcast_read(struct llcc_perfmon_private *llcc_priv,
+		unsigned int offset, uint32_t *val)
+{
+	regmap_read(llcc_priv->llcc_map, llcc_priv->broadcast_off + offset,
+			val);
+}
+
+static void llcc_bcast_modify(struct llcc_perfmon_private *llcc_priv,
+		unsigned int offset, uint32_t val, uint32_t mask)
+{
+	uint32_t readval;
+
+	llcc_bcast_read(llcc_priv, offset, &readval);
+	readval &= ~mask;
+	readval |= val & mask;
+	llcc_bcast_write(llcc_priv, offset, readval);
+}
+
+static void perfmon_counter_dump(struct llcc_perfmon_private *llcc_priv)
+{
+	uint32_t val;
+	unsigned int i, j;
+	unsigned long long total;
+
+	llcc_bcast_write(llcc_priv, PERFMON_DUMP, MONITOR_DUMP);
+	for (i = 0; i < llcc_priv->configured_counters - 1; i++) {
+		total = 0;
+		for (j = 0; j < llcc_priv->num_banks; j++) {
+			regmap_read(llcc_priv->llcc_map, llcc_priv->bank_off[j]
+					+ LLCC_COUNTER_n_VALUE(i), &val);
+			total += val;
+		}
+
+		llcc_priv->configured[i].counter_dump += total;
+	}
+
+	total = 0;
+	for (j = 0; j < llcc_priv->num_banks; j++) {
+		regmap_read(llcc_priv->llcc_map, llcc_priv->bank_off[j] +
+				LLCC_COUNTER_n_VALUE(i), &val);
+		total += val;
+	}
+
+	llcc_priv->configured[i].counter_dump += total;
+}
+
+static ssize_t perfmon_counter_dump_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
+	uint32_t val;
+	unsigned int i, j;
+	unsigned long long total;
+	ssize_t cnt = 0, print;
+
+	if (llcc_priv->configured_counters == 0) {
+		pr_err("counters not configured\n");
+		return cnt;
+	}
+
+	if (llcc_priv->expires.tv64) {
+		perfmon_counter_dump(llcc_priv);
+		for (i = 0; i < llcc_priv->configured_counters - 1; i++) {
+			print = snprintf(buf, MAX_STRING_SIZE, "Port %02d,",
+					llcc_priv->configured[i].port_sel);
+			buf += print;
+			cnt += print;
+			print = snprintf(buf, MAX_STRING_SIZE, "Event %02d,",
+					llcc_priv->configured[i].event_sel);
+			buf += print;
+			cnt += print;
+
+			print = snprintf(buf, MAX_STRING_SIZE, "0x%016llx\n",
+				       llcc_priv->configured[i].counter_dump);
+			buf += print;
+			cnt += print;
+			llcc_priv->configured[i].counter_dump = 0;
+		}
+
+		print = snprintf(buf, MAX_STRING_SIZE, "CYCLE COUNT, ,");
+		buf += print;
+		cnt += print;
+		print = snprintf(buf, MAX_STRING_SIZE, "0x%016llx\n",
+			llcc_priv->configured[i].counter_dump);
+		buf += print;
+		cnt += print;
+		llcc_priv->configured[i].counter_dump = 0;
+		hrtimer_forward_now(&llcc_priv->hrtimer, llcc_priv->expires);
+	} else {
+		llcc_bcast_write(llcc_priv, PERFMON_DUMP, MONITOR_DUMP);
+
+		for (i = 0; i < llcc_priv->configured_counters - 1; i++) {
+			print = snprintf(buf, MAX_STRING_SIZE, "Port %02d,",
+					llcc_priv->configured[i].port_sel);
+			buf += print;
+			cnt += print;
+			print = snprintf(buf, MAX_STRING_SIZE, "Event %02d,",
+					llcc_priv->configured[i].event_sel);
+			buf += print;
+			cnt += print;
+			total = 0;
+			for (j = 0; j < llcc_priv->num_banks; j++) {
+				regmap_read(llcc_priv->llcc_map,
+						llcc_priv->bank_off[j]
+						+ LLCC_COUNTER_n_VALUE(i),
+						&val);
+				print = snprintf(buf, MAX_STRING_SIZE,
+						"0x%08x,", val);
+				buf += print;
+				cnt += print;
+				total += val;
+			}
+
+			print = snprintf(buf, MAX_STRING_SIZE, "0x%09llx\n",
+					total);
+			buf += print;
+			cnt += print;
+		}
+
+		print = snprintf(buf, MAX_STRING_SIZE, "CYCLE COUNT, ,");
+		buf += print;
+		cnt += print;
+		total = 0;
+		for (j = 0; j < llcc_priv->num_banks; j++) {
+			regmap_read(llcc_priv->llcc_map,
+					llcc_priv->bank_off[j] +
+					LLCC_COUNTER_n_VALUE(i), &val);
+			print = snprintf(buf, MAX_STRING_SIZE, "0x%08x,", val);
+			buf += print;
+			cnt += print;
+			total += val;
+		}
+
+		print = snprintf(buf, MAX_STRING_SIZE, "0x%09llx\n", total);
+		buf += print;
+		cnt += print;
+	}
+
+	return cnt;
+}
+
+static ssize_t perfmon_configure_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
+	struct event_port_ops *port_ops;
+	unsigned int j;
+	unsigned long port_sel, event_sel;
+	uint32_t val;
+	char *token, *delim = DELIM_CHAR;
+
+	mutex_lock(&llcc_priv->mutex);
+	if (llcc_priv->configured_counters) {
+		pr_err("Counters configured already, remove & try again\n");
+		mutex_unlock(&llcc_priv->mutex);
+		return -EINVAL;
+	}
+
+	llcc_priv->configured_counters = 0;
+	j = 0;
+	token = strsep((char **)&buf, delim);
+
+	while (token != NULL) {
+		if (kstrtoul(token, 10, &port_sel))
+			break;
+
+		if (port_sel >= llcc_priv->port_configd)
+			break;
+
+		token = strsep((char **)&buf, delim);
+		if (token == NULL)
+			break;
+
+		if (kstrtoul(token, 10, &event_sel))
+			break;
+
+		token = strsep((char **)&buf, delim);
+		if (event_sel >= EVENT_NUM_MAX) {
+			pr_err("unsupported event num %ld\n", event_sel);
+			continue;
+		}
+
+		llcc_priv->configured[j].port_sel = port_sel;
+		llcc_priv->configured[j].event_sel = event_sel;
+		port_ops = llcc_priv->port_ops[port_sel];
+		pr_info("configured event %ld counter %d on port %ld\n",
+				event_sel, j, port_sel);
+		port_ops->event_config(llcc_priv, event_sel, j++, true);
+		if (!(llcc_priv->enables_port & (1 << port_sel)))
+			if (port_ops->event_enable)
+				port_ops->event_enable(llcc_priv, true);
+
+		llcc_priv->enables_port |= (1 << port_sel);
+
+		/* Last perfmon counter for cycle counter */
+		if (llcc_priv->configured_counters++ ==
+				(LLCC_PERFMON_COUNTER_MAX - 2))
+			break;
+	}
+
+	/* configure clock event */
+	val = COUNT_CLOCK_EVENT | CLEAR_ON_ENABLE | CLEAR_ON_DUMP;
+	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(j), val);
+
+	llcc_priv->configured_counters++;
+	mutex_unlock(&llcc_priv->mutex);
+	return count;
+}
+
+static ssize_t perfmon_remove_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
+	struct event_port_ops *port_ops;
+	unsigned int j, counter_remove = 0;
+	unsigned long port_sel, event_sel;
+	char *token, *delim = DELIM_CHAR;
+
+	mutex_lock(&llcc_priv->mutex);
+	if (!llcc_priv->configured_counters) {
+		pr_err("Counters not configured\n");
+		mutex_unlock(&llcc_priv->mutex);
+		return -EINVAL;
+	}
+
+	j = 0;
+	token = strsep((char **)&buf, delim);
+
+	while (token != NULL) {
+		if (kstrtoul(token, 10, &port_sel))
+			break;
+
+		if (port_sel >= llcc_priv->port_configd)
+			break;
+
+		token = strsep((char **)&buf, delim);
+		if (token == NULL)
+			break;
+
+		if (kstrtoul(token, 10, &event_sel))
+			break;
+
+		token = strsep((char **)&buf, delim);
+		if (event_sel >= EVENT_NUM_MAX) {
+			pr_err("unsupported event num %ld\n", event_sel);
+			continue;
+		}
+
+		/* put dummy values */
+		llcc_priv->configured[j].port_sel = MAX_NUMBER_OF_PORTS;
+		llcc_priv->configured[j].event_sel = 100;
+		port_ops = llcc_priv->port_ops[port_sel];
+		pr_info("Removed event %ld counter %d from port %ld\n",
+				event_sel, j, port_sel);
+
+		port_ops->event_config(llcc_priv, event_sel, j++, false);
+		if (llcc_priv->enables_port & (1 << port_sel))
+			if (port_ops->event_enable)
+				port_ops->event_enable(llcc_priv, false);
+
+		llcc_priv->enables_port &= ~(1 << port_sel);
+
+		/* Last perfmon counter for cycle counter */
+		if (counter_remove++ == (LLCC_PERFMON_COUNTER_MAX - 2))
+			break;
+	}
+
+	/* remove clock event */
+	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(j), 0);
+
+	llcc_priv->configured_counters = 0;
+	mutex_unlock(&llcc_priv->mutex);
+	return count;
+}
+
+static enum filter_type find_filter_type(char *filter)
+{
+	enum filter_type ret = UNKNOWN;
+
+	if (!strcmp(filter, "SCID"))
+		ret = SCID;
+	else if (!strcmp(filter, "MID"))
+		ret = MID;
+	else if (!strcmp(filter, "PROFILING_TAG"))
+		ret = PROFILING_TAG;
+	else if (!strcmp(filter, "WAY_ID"))
+		ret = WAY_ID;
+
+	return ret;
+}
+
+static ssize_t perfmon_filter_config_store(struct device *dev,
+		struct device_attribute *attr, const char *buf,
+		size_t count)
+{
+	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
+	unsigned long port, val;
+	struct event_port_ops *port_ops;
+	char *token, *delim = DELIM_CHAR;
+	enum filter_type filter = UNKNOWN;
+
+	mutex_lock(&llcc_priv->mutex);
+
+	token = strsep((char **)&buf, delim);
+	if (token != NULL)
+		filter = find_filter_type(token);
+
+	if (filter == UNKNOWN) {
+		pr_err("filter configuration failed, Unsupported filter\n");
+		goto filter_config_free;
+	}
+
+	token = strsep((char **)&buf, delim);
+	if (token == NULL) {
+		pr_err("filter configuration failed, Wrong input\n");
+		goto filter_config_free;
+	}
+
+	if (kstrtoul(token, 10, &val)) {
+		pr_err("filter configuration failed, Wrong input\n");
+		goto filter_config_free;
+	}
+
+	if ((filter == SCID) && (val >= SCID_MAX)) {
+		pr_err("filter configuration failed, SCID above MAX value\n");
+		goto filter_config_free;
+	}
+
+
+	while (token != NULL) {
+		token = strsep((char **)&buf, delim);
+		if (token == NULL)
+			break;
+
+		if (kstrtoul(token, 10, &port))
+			break;
+
+		llcc_priv->filtered_ports |= 1 << port;
+		port_ops = llcc_priv->port_ops[port];
+		if (port_ops->event_filter_config)
+			port_ops->event_filter_config(llcc_priv, filter, val,
+					true);
+	}
+
+	mutex_unlock(&llcc_priv->mutex);
+	return count;
+
+filter_config_free:
+	mutex_unlock(&llcc_priv->mutex);
+	return -EINVAL;
+}
+
+static ssize_t perfmon_filter_remove_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
+	struct event_port_ops *port_ops;
+	unsigned long port, val;
+	char *token, *delim = DELIM_CHAR;
+	enum filter_type filter = UNKNOWN;
+
+	mutex_lock(&llcc_priv->mutex);
+	token = strsep((char **)&buf, delim);
+	if (token != NULL)
+		filter = find_filter_type(token);
+
+	if (filter == UNKNOWN) {
+		pr_err("filter configuration failed, Unsupported filter\n");
+		goto filter_remove_free;
+	}
+
+	token = strsep((char **)&buf, delim);
+	if (token == NULL) {
+		pr_err("filter configuration failed, Wrong input\n");
+		goto filter_remove_free;
+	}
+
+	if (kstrtoul(token, 10, &val)) {
+		pr_err("filter configuration failed, Wrong input\n");
+		goto filter_remove_free;
+	}
+
+	if ((filter == SCID) && (val >= SCID_MAX)) {
+		pr_err("filter configuration failed, SCID above MAX value\n");
+		goto filter_remove_free;
+	}
+
+	while (token != NULL) {
+		token = strsep((char **)&buf, delim);
+		if (token == NULL)
+			break;
+
+		if (kstrtoul(token, 10, &port))
+			break;
+
+		llcc_priv->filtered_ports &= ~(1 << port);
+		port_ops = llcc_priv->port_ops[port];
+		if (port_ops->event_filter_config)
+			port_ops->event_filter_config(llcc_priv, filter, val,
+					false);
+	}
+
+	mutex_unlock(&llcc_priv->mutex);
+	return count;
+
+filter_remove_free:
+	mutex_unlock(&llcc_priv->mutex);
+	return count;
+}
+
+static ssize_t perfmon_start_store(struct device *dev,
+		struct device_attribute *attr, const char *buf,
+		size_t count)
+{
+	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
+	uint32_t val = 0, mask;
+	unsigned long start;
+
+	if (kstrtoul(buf, 10, &start))
+		return -EINVAL;
+
+	mutex_lock(&llcc_priv->mutex);
+	if (start) {
+		if (!llcc_priv->configured_counters)
+			pr_err("start failed. perfmon not configured\n");
+
+		val = MANUAL_MODE | MONITOR_EN;
+		if (llcc_priv->expires.tv64) {
+		if (hrtimer_is_queued(&llcc_priv->hrtimer))
+			hrtimer_forward_now(&llcc_priv->hrtimer,
+					llcc_priv->expires);
+		else
+			hrtimer_start(&llcc_priv->hrtimer,
+					llcc_priv->expires,
+					HRTIMER_MODE_REL_PINNED);
+		}
+
+	} else {
+		if (llcc_priv->expires.tv64)
+			hrtimer_cancel(&llcc_priv->hrtimer);
+
+		if (!llcc_priv->configured_counters)
+			pr_err("stop failed. perfmon not configured\n");
+	}
+
+	mask = PERFMON_MODE_MONITOR_MODE_MASK | PERFMON_MODE_MONITOR_EN_MASK;
+	llcc_bcast_modify(llcc_priv, PERFMON_MODE, val, mask);
+
+
+	mutex_unlock(&llcc_priv->mutex);
+	return count;
+}
+
+static ssize_t perfmon_ns_periodic_dump_store(struct device *dev,
+		struct device_attribute *attr, const char *buf,
+		size_t count)
+{
+	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
+
+	if (kstrtos64(buf, 10, &llcc_priv->expires.tv64))
+		return -EINVAL;
+
+	mutex_lock(&llcc_priv->mutex);
+	if (!llcc_priv->expires.tv64) {
+		hrtimer_cancel(&llcc_priv->hrtimer);
+		mutex_unlock(&llcc_priv->mutex);
+		return count;
+	}
+
+	if (hrtimer_is_queued(&llcc_priv->hrtimer))
+		hrtimer_forward_now(&llcc_priv->hrtimer, llcc_priv->expires);
+	else
+		hrtimer_start(&llcc_priv->hrtimer, llcc_priv->expires,
+			      HRTIMER_MODE_REL_PINNED);
+
+	mutex_unlock(&llcc_priv->mutex);
+	return count;
+}
+
+static ssize_t perfmon_scid_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct llcc_perfmon_private *llcc_priv = dev_get_drvdata(dev);
+	uint32_t val;
+	unsigned int i, offset;
+	ssize_t cnt = 0, print;
+
+	for (i = 0; i < SCID_MAX; i++) {
+		offset = TRP_SCID_n_STATUS(i);
+		llcc_bcast_read(llcc_priv, offset, &val);
+		if (val & TRP_SCID_STATUS_ACTIVE_MASK)
+			print = snprintf(buf, MAX_STRING_SIZE, "SCID %02d %10s",
+					i, "ACTIVE");
+		else
+			print = snprintf(buf, MAX_STRING_SIZE, "SCID %02d %10s",
+					i, "DEACTIVE");
+
+		buf += print;
+		cnt += print;
+
+		val = (val & TRP_SCID_STATUS_CURRENT_CAP_MASK)
+			>> TRP_SCID_STATUS_CURRENT_CAP_SHIFT;
+		print = snprintf(buf, MAX_STRING_SIZE, ",0x%08x\n", val);
+		buf += print;
+		cnt += print;
+	}
+
+	return cnt;
+}
+
+static DEVICE_ATTR_RO(perfmon_counter_dump);
+static DEVICE_ATTR_WO(perfmon_configure);
+static DEVICE_ATTR_WO(perfmon_remove);
+static DEVICE_ATTR_WO(perfmon_filter_config);
+static DEVICE_ATTR_WO(perfmon_filter_remove);
+static DEVICE_ATTR_WO(perfmon_start);
+static DEVICE_ATTR_RO(perfmon_scid_status);
+static DEVICE_ATTR_WO(perfmon_ns_periodic_dump);
+
+static struct attribute *llcc_perfmon_attrs[] = {
+	&dev_attr_perfmon_counter_dump.attr,
+	&dev_attr_perfmon_configure.attr,
+	&dev_attr_perfmon_remove.attr,
+	&dev_attr_perfmon_filter_config.attr,
+	&dev_attr_perfmon_filter_remove.attr,
+	&dev_attr_perfmon_start.attr,
+	&dev_attr_perfmon_scid_status.attr,
+	&dev_attr_perfmon_ns_periodic_dump.attr,
+	NULL,
+};
+
+static struct attribute_group llcc_perfmon_group = {
+	.attrs	= llcc_perfmon_attrs,
+};
+
+static void perfmon_counter_config(struct llcc_perfmon_private *llcc_priv,
+		unsigned int port, unsigned int event_counter_num)
+{
+	uint32_t val;
+
+	val = (port & PERFMON_PORT_SELECT_MASK) |
+		((event_counter_num << EVENT_SELECT_SHIFT) &
+		PERFMON_EVENT_SELECT_MASK) | CLEAR_ON_ENABLE | CLEAR_ON_DUMP;
+	llcc_bcast_write(llcc_priv, PERFMON_COUNTER_n_CONFIG(event_counter_num),
+			val);
+}
+
+static void feac_event_config(struct llcc_perfmon_private *llcc_priv,
+		unsigned int event_type, unsigned int event_counter_num,
+		bool enable)
+{
+	uint32_t val = 0, mask, counter_num = 0;
+
+	mask = EVENT_SEL_MASK;
+	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC))
+		mask |= FILTER_SEL_MASK | FILTER_EN_MASK;
+
+	if (enable) {
+		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
+		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC))
+			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
+
+		counter_num = event_counter_num;
+	}
+
+	llcc_bcast_modify(llcc_priv, FEAC_PROF_EVENT_n_CFG(event_counter_num),
+			val, mask);
+	perfmon_counter_config(llcc_priv, EVENT_PORT_FEAC, counter_num);
+}
+
+static void feac_event_enable(struct llcc_perfmon_private *llcc_priv,
+		bool enable)
+{
+	uint32_t val = 0, mask;
+
+	if (enable) {
+		val = (BYTE_SCALING << BYTE_SCALING_SHIFT) |
+			(BEAT_SCALING << BEAT_SCALING_SHIFT) | PROF_EN;
+
+		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC))
+			val |= (FILTER_0 << FEAC_SCALING_FILTER_SEL_SHIFT) |
+			       FEAC_SCALING_FILTER_EN;
+	}
+
+	mask = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_BYTE_SCALING_MASK
+		| PROF_CFG_EN_MASK;
+
+	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEAC))
+		mask |= FEAC_SCALING_FILTER_SEL_MASK |
+			FEAC_SCALING_FILTER_EN_MASK;
+
+	llcc_bcast_modify(llcc_priv, FEAC_PROF_CFG, val, mask);
+}
+
+static void feac_event_filter_config(struct llcc_perfmon_private *llcc_priv,
+		enum filter_type filter, unsigned long match, bool enable)
+{
+	uint32_t val = 0, mask;
+
+	if (filter == SCID) {
+		if (enable)
+			val = (match << SCID_MATCH_SHIFT)
+				| SCID_MASK_MASK;
+
+		mask = SCID_MATCH_MASK | SCID_MASK_MASK;
+		llcc_bcast_modify(llcc_priv, FEAC_PROF_FILTER_0_CFG6, val,
+				mask);
+	} else if (filter == MID) {
+		if (enable)
+			val = (match << MID_MATCH_SHIFT)
+				| MID_MASK_MASK;
+
+		mask = MID_MATCH_MASK | MID_MASK_MASK;
+		llcc_bcast_modify(llcc_priv, FEAC_PROF_FILTER_0_CFG5, val,
+				mask);
+	} else {
+		pr_err("unknown filter/not supported\n");
+	}
+}
+
+static struct event_port_ops feac_port_ops = {
+	.event_config	= feac_event_config,
+	.event_enable	= feac_event_enable,
+	.event_filter_config	= feac_event_filter_config,
+};
+
+static void ferc_event_config(struct llcc_perfmon_private *llcc_priv,
+		unsigned int event_type, unsigned int event_counter_num,
+		bool enable)
+{
+	uint32_t val = 0, mask, counter_num = 0;
+
+	mask = EVENT_SEL_MASK;
+	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FERC))
+		mask |= FILTER_SEL_MASK | FILTER_EN_MASK;
+
+	if (enable) {
+		val = event_type << EVENT_SEL_SHIFT;
+		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FERC))
+			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
+
+		counter_num = event_counter_num;
+	}
+
+	llcc_bcast_modify(llcc_priv, FERC_PROF_EVENT_n_CFG(event_counter_num),
+			val, mask);
+	perfmon_counter_config(llcc_priv, EVENT_PORT_FERC, counter_num);
+}
+
+static void ferc_event_enable(struct llcc_perfmon_private *llcc_priv,
+		bool enable)
+{
+	uint32_t val = 0, mask;
+
+	if (enable)
+		val = (BYTE_SCALING << BYTE_SCALING_SHIFT) |
+			(BEAT_SCALING << BEAT_SCALING_SHIFT) | PROF_EN;
+
+	mask = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_BYTE_SCALING_MASK
+		| PROF_CFG_EN_MASK;
+	llcc_bcast_modify(llcc_priv, FERC_PROF_CFG, val, mask);
+}
+
+static void ferc_event_filter_config(struct llcc_perfmon_private *llcc_priv,
+		enum filter_type filter, unsigned long match, bool enable)
+{
+	uint32_t val = 0, mask;
+
+	if (filter != PROFILING_TAG) {
+		pr_err("unknown filter/not supported\n");
+		return;
+	}
+
+	if (enable)
+		val = (match << PROFTAG_MATCH_SHIFT) |
+		       FILTER_0_MASK << PROFTAG_MASK_SHIFT;
+
+	mask = PROFTAG_MATCH_MASK | PROFTAG_MASK_MASK;
+	llcc_bcast_modify(llcc_priv, FERC_PROF_FILTER_0_CFG0, val, mask);
+}
+
+static struct event_port_ops ferc_port_ops = {
+	.event_config	= ferc_event_config,
+	.event_enable	= ferc_event_enable,
+	.event_filter_config	= ferc_event_filter_config,
+};
+
+static void fewc_event_config(struct llcc_perfmon_private *llcc_priv,
+		unsigned int event_type, unsigned int event_counter_num,
+		bool enable)
+{
+	uint32_t val = 0, mask, counter_num = 0;
+
+	mask = EVENT_SEL_MASK;
+	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEWC))
+		mask |= FILTER_SEL_MASK | FILTER_EN_MASK;
+
+	if (enable) {
+		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
+		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_FEWC))
+			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
+
+		counter_num = event_counter_num;
+	}
+
+	llcc_bcast_modify(llcc_priv, FEWC_PROF_EVENT_n_CFG(event_counter_num),
+			val, mask);
+	perfmon_counter_config(llcc_priv, EVENT_PORT_FEWC, counter_num);
+}
+
+static void fewc_event_filter_config(struct llcc_perfmon_private *llcc_priv,
+		enum filter_type filter, unsigned long match, bool enable)
+{
+	uint32_t val = 0, mask;
+
+	if (filter != PROFILING_TAG) {
+		pr_err("unknown filter/not supported\n");
+		return;
+	}
+
+	if (enable)
+		val = (match << PROFTAG_MATCH_SHIFT) |
+		       FILTER_0_MASK << PROFTAG_MASK_SHIFT;
+
+	mask = PROFTAG_MATCH_MASK | PROFTAG_MASK_MASK;
+	llcc_bcast_modify(llcc_priv, FEWC_PROF_FILTER_0_CFG0, val, mask);
+}
+
+static struct event_port_ops fewc_port_ops = {
+	.event_config	= fewc_event_config,
+	.event_filter_config	= fewc_event_filter_config,
+};
+
+static void beac_event_config(struct llcc_perfmon_private *llcc_priv,
+		unsigned int event_type, unsigned int event_counter_num,
+		bool enable)
+{
+	uint32_t val = 0, mask, counter_num = 0;
+
+	mask = EVENT_SEL_MASK;
+	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BEAC))
+		mask |= FILTER_SEL_MASK | FILTER_EN_MASK;
+
+	if (enable) {
+		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
+		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BEAC))
+			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
+
+		counter_num = event_counter_num;
+	}
+
+	llcc_bcast_modify(llcc_priv, BEAC_PROF_EVENT_n_CFG(event_counter_num),
+			val, mask);
+	perfmon_counter_config(llcc_priv, EVENT_PORT_BEAC, counter_num);
+}
+
+static void beac_event_enable(struct llcc_perfmon_private *llcc_priv,
+		bool enable)
+{
+	uint32_t val = 0, mask;
+
+	if (enable)
+		val = (BYTE_SCALING << BYTE_SCALING_SHIFT) |
+			(BEAT_SCALING << BEAT_SCALING_SHIFT) | PROF_EN;
+
+	mask = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_BYTE_SCALING_MASK
+		| PROF_CFG_EN_MASK;
+	llcc_bcast_modify(llcc_priv, BEAC_PROF_CFG, val, mask);
+}
+
+static void beac_event_filter_config(struct llcc_perfmon_private *llcc_priv,
+		enum filter_type filter, unsigned long match, bool enable)
+{
+	uint32_t val = 0, mask;
+
+	if (filter != PROFILING_TAG) {
+		pr_err("unknown filter/not supported\n");
+		return;
+	}
+
+	if (enable)
+		val = (match << BEAC_PROFTAG_MATCH_SHIFT)
+			| FILTER_0_MASK << BEAC_PROFTAG_MASK_SHIFT;
+
+	mask = BEAC_PROFTAG_MASK_MASK | BEAC_PROFTAG_MATCH_MASK;
+	llcc_bcast_modify(llcc_priv, BEAC_PROF_FILTER_0_CFG5, val, mask);
+
+	if (enable)
+		val = match << BEAC_MC_PROFTAG_SHIFT;
+
+	mask = BEAC_MC_PROFTAG_MASK;
+	llcc_bcast_modify(llcc_priv, BEAC_PROF_CFG, val, mask);
+}
+
+static struct event_port_ops beac_port_ops = {
+	.event_config	= beac_event_config,
+	.event_enable	= beac_event_enable,
+	.event_filter_config	= beac_event_filter_config,
+};
+
+static void berc_event_config(struct llcc_perfmon_private *llcc_priv,
+		unsigned int event_type, unsigned int event_counter_num,
+		bool enable)
+{
+	uint32_t val = 0, mask, counter_num = 0;
+
+	mask = EVENT_SEL_MASK;
+	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BERC))
+		mask |= FILTER_SEL_MASK | FILTER_EN_MASK;
+
+	if (enable) {
+		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
+		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_BERC))
+			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
+
+		counter_num = event_counter_num;
+	}
+
+	llcc_bcast_modify(llcc_priv, BERC_PROF_EVENT_n_CFG(event_counter_num),
+			val, mask);
+	perfmon_counter_config(llcc_priv, EVENT_PORT_BERC, counter_num);
+}
+
+static void berc_event_enable(struct llcc_perfmon_private *llcc_priv,
+		bool enable)
+{
+	uint32_t val = 0, mask;
+
+	if (enable)
+		val = (BYTE_SCALING << BYTE_SCALING_SHIFT) |
+			(BEAT_SCALING << BEAT_SCALING_SHIFT) | PROF_EN;
+
+	mask = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_BYTE_SCALING_MASK
+		| PROF_CFG_EN_MASK;
+	llcc_bcast_modify(llcc_priv, BERC_PROF_CFG, val, mask);
+}
+
+static void berc_event_filter_config(struct llcc_perfmon_private *llcc_priv,
+		enum filter_type filter, unsigned long match, bool enable)
+{
+	uint32_t val = 0, mask;
+
+	if (filter != PROFILING_TAG) {
+		pr_err("unknown filter/not supported\n");
+		return;
+	}
+
+	if (enable)
+		val = (match << PROFTAG_MATCH_SHIFT) |
+		       FILTER_0_MASK << PROFTAG_MASK_SHIFT;
+
+	mask = PROFTAG_MATCH_MASK | PROFTAG_MASK_MASK;
+	llcc_bcast_modify(llcc_priv, BERC_PROF_FILTER_0_CFG0, val, mask);
+}
+
+static struct event_port_ops berc_port_ops = {
+	.event_config	= berc_event_config,
+	.event_enable	= berc_event_enable,
+	.event_filter_config	= berc_event_filter_config,
+};
+
+static void trp_event_config(struct llcc_perfmon_private *llcc_priv,
+		unsigned int event_type, unsigned int event_counter_num,
+		bool enable)
+{
+	uint32_t val = 0, mask, counter_num = 0;
+
+	mask = EVENT_SEL_MASK;
+	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_TRP))
+		mask |= FILTER_SEL_MASK | FILTER_EN_MASK;
+
+	if (enable) {
+		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
+		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_TRP))
+			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
+
+		counter_num = event_counter_num;
+	}
+
+	llcc_bcast_modify(llcc_priv, TRP_PROF_EVENT_n_CFG(event_counter_num),
+			val, mask);
+	perfmon_counter_config(llcc_priv, EVENT_PORT_TRP, counter_num);
+}
+
+static void trp_event_filter_config(struct llcc_perfmon_private *llcc_priv,
+		enum filter_type filter, unsigned long match, bool enable)
+{
+	uint32_t val = 0, mask;
+
+	if (filter == SCID) {
+		if (enable)
+			val = (match << TRP_SCID_MATCH_SHIFT)
+				| TRP_SCID_MASK_MASK;
+
+		mask = TRP_SCID_MATCH_MASK | TRP_SCID_MASK_MASK;
+	} else if (filter == WAY_ID) {
+		if (enable)
+			val = (match << TRP_WAY_ID_MATCH_SHIFT)
+				| TRP_WAY_ID_MASK_MASK;
+
+		mask = TRP_WAY_ID_MATCH_MASK |
+			TRP_WAY_ID_MASK_MASK;
+	} else if (filter == PROFILING_TAG) {
+		if (enable)
+			val = (match << TRP_PROFTAG_MATCH_SHIFT)
+				| FILTER_0_MASK << TRP_PROFTAG_MASK_SHIFT;
+
+		mask = TRP_PROFTAG_MATCH_MASK | TRP_PROFTAG_MASK_MASK;
+	} else {
+		pr_err("unknown filter/not supported\n");
+		return;
+	}
+
+	llcc_bcast_modify(llcc_priv, TRP_PROF_FILTER_0_CFG1, val, mask);
+}
+
+static struct event_port_ops  trp_port_ops = {
+	.event_config	= trp_event_config,
+	.event_filter_config	= trp_event_filter_config,
+};
+
+static void drp_event_config(struct llcc_perfmon_private *llcc_priv,
+		unsigned int event_type, unsigned int event_counter_num,
+		bool enable)
+{
+	uint32_t val = 0, mask, counter_num = 0;
+
+	mask = EVENT_SEL_MASK;
+	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_DRP))
+		mask |= FILTER_SEL_MASK | FILTER_EN_MASK;
+
+	if (enable) {
+		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
+		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_DRP))
+			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
+
+		counter_num = event_counter_num;
+	}
+
+	llcc_bcast_modify(llcc_priv, DRP_PROF_EVENT_n_CFG(event_counter_num),
+			val, mask);
+	perfmon_counter_config(llcc_priv, EVENT_PORT_DRP, counter_num);
+}
+
+static void drp_event_enable(struct llcc_perfmon_private *llcc_priv,
+		bool enable)
+{
+	uint32_t val = 0, mask;
+
+	if (enable)
+		val = (BEAT_SCALING << BEAT_SCALING_SHIFT) | PROF_EN;
+
+	mask = PROF_CFG_BEAT_SCALING_MASK | PROF_CFG_EN_MASK;
+	llcc_bcast_modify(llcc_priv, DRP_PROF_CFG, val, mask);
+}
+
+static struct event_port_ops drp_port_ops = {
+	.event_config	= drp_event_config,
+	.event_enable	= drp_event_enable,
+};
+
+static void pmgr_event_config(struct llcc_perfmon_private *llcc_priv,
+		unsigned int event_type, unsigned int event_counter_num,
+		bool enable)
+{
+	uint32_t val = 0, mask, counter_num = 0;
+
+	mask = EVENT_SEL_MASK;
+	if (llcc_priv->filtered_ports & (1 << EVENT_PORT_PMGR))
+		mask |= FILTER_SEL_MASK | FILTER_EN_MASK;
+
+	if (enable) {
+		val = (event_type << EVENT_SEL_SHIFT) & EVENT_SEL_MASK;
+		if (llcc_priv->filtered_ports & (1 << EVENT_PORT_PMGR))
+			val |= (FILTER_0 << FILTER_SEL_SHIFT) | FILTER_EN;
+
+		counter_num = event_counter_num;
+	}
+
+	llcc_bcast_modify(llcc_priv, PMGR_PROF_EVENT_n_CFG(event_counter_num),
+			val, mask);
+	perfmon_counter_config(llcc_priv, EVENT_PORT_PMGR, counter_num);
+}
+
+static struct event_port_ops pmgr_port_ops = {
+	.event_config	= pmgr_event_config,
+};
+
+static void llcc_register_event_port(struct llcc_perfmon_private *llcc_priv,
+		struct event_port_ops *ops, unsigned int event_port_num)
+{
+	if (llcc_priv->port_configd >= MAX_NUMBER_OF_PORTS) {
+		pr_err("Register port Failure!");
+		return;
+	}
+
+	llcc_priv->port_configd = llcc_priv->port_configd + 1;
+	llcc_priv->port_ops[event_port_num] = ops;
+}
+
+static enum hrtimer_restart llcc_perfmon_timer_handler(struct hrtimer *hrtimer)
+{
+	struct llcc_perfmon_private *llcc_priv = container_of(hrtimer,
+			struct llcc_perfmon_private, hrtimer);
+
+	perfmon_counter_dump(llcc_priv);
+	hrtimer_forward_now(&llcc_priv->hrtimer, llcc_priv->expires);
+	return HRTIMER_RESTART;
+}
+
+static int llcc_perfmon_probe(struct platform_device *pdev)
+{
+	int result = 0;
+	struct llcc_perfmon_private *llcc_priv;
+	struct device *dev = &pdev->dev;
+	uint32_t val;
+
+	llcc_priv = devm_kzalloc(&pdev->dev, sizeof(*llcc_priv), GFP_KERNEL);
+	if (llcc_priv == NULL)
+		return -ENOMEM;
+
+	llcc_priv->llcc_map = syscon_node_to_regmap(dev->parent->of_node);
+	if (IS_ERR(llcc_priv->llcc_map))
+		return PTR_ERR(llcc_priv->llcc_map);
+
+	result = of_property_read_u32(pdev->dev.parent->of_node,
+			"qcom,llcc-broadcast-off", &llcc_priv->broadcast_off);
+	if (result) {
+		pr_err("Invalid qcom,broadcast-off entry\n");
+		return result;
+	}
+
+	llcc_bcast_read(llcc_priv, LLCC_COMMON_STATUS0, &val);
+
+	llcc_priv->num_banks = (val & LB_CNT_MASK) >> LB_CNT_SHIFT;
+	result = of_property_read_variable_u32_array(pdev->dev.parent->of_node,
+			"qcom,llcc-banks-off", (u32 *)&llcc_priv->bank_off,
+			1, llcc_priv->num_banks);
+	if (result < 0) {
+		pr_err("Invalid qcom,llcc-banks-off entry\n");
+		return result;
+	}
+
+	result = sysfs_create_group(&pdev->dev.kobj, &llcc_perfmon_group);
+	if (result) {
+		pr_err("Unable to create sysfs version group\n");
+		return result;
+	}
+
+	mutex_init(&llcc_priv->mutex);
+	platform_set_drvdata(pdev, llcc_priv);
+	llcc_register_event_port(llcc_priv, &feac_port_ops, EVENT_PORT_FEAC);
+	llcc_register_event_port(llcc_priv, &ferc_port_ops, EVENT_PORT_FERC);
+	llcc_register_event_port(llcc_priv, &fewc_port_ops, EVENT_PORT_FEWC);
+	llcc_register_event_port(llcc_priv, &beac_port_ops, EVENT_PORT_BEAC);
+	llcc_register_event_port(llcc_priv, &berc_port_ops, EVENT_PORT_BERC);
+	llcc_register_event_port(llcc_priv, &trp_port_ops, EVENT_PORT_TRP);
+	llcc_register_event_port(llcc_priv, &drp_port_ops, EVENT_PORT_DRP);
+	llcc_register_event_port(llcc_priv, &pmgr_port_ops, EVENT_PORT_PMGR);
+	hrtimer_init(&llcc_priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	llcc_priv->hrtimer.function = llcc_perfmon_timer_handler;
+	llcc_priv->expires.tv64 = 0;
+	return 0;
+}
+
+static int llcc_perfmon_remove(struct platform_device *pdev)
+{
+	struct llcc_perfmon_private *llcc_priv = platform_get_drvdata(pdev);
+
+	while (hrtimer_active(&llcc_priv->hrtimer))
+		hrtimer_cancel(&llcc_priv->hrtimer);
+
+	mutex_destroy(&llcc_priv->mutex);
+	sysfs_remove_group(&pdev->dev.kobj, &llcc_perfmon_group);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id of_match_llcc[] = {
+	{
+		.compatible = "qcom,llcc-perfmon",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_match_llcc);
+
+static struct platform_driver llcc_perfmon_driver = {
+	.probe = llcc_perfmon_probe,
+	.remove	= llcc_perfmon_remove,
+	.driver	= {
+		.name = LLCC_PERFMON_NAME,
+		.of_match_table = of_match_llcc,
+	}
+};
+module_platform_driver(llcc_perfmon_driver);
+
+MODULE_DESCRIPTION("QCOM LLCC PMU MONITOR");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc_perfmon.h b/drivers/soc/qcom/llcc_perfmon.h
new file mode 100644
index 0000000..11dd2f8
--- /dev/null
+++ b/drivers/soc/qcom/llcc_perfmon.h
@@ -0,0 +1,197 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SOC_QCOM_LLCC_PERFMON_H_
+#define _SOC_QCOM_LLCC_PERFMON_H_
+
+#define LLCC_COMMON_STATUS0		(0x3000C)
+/* FEAC */
+#define FEAC_PROF_FILTER_0_CFG5		(0x037014)
+#define FEAC_PROF_FILTER_0_CFG6		(0x037018)
+#define FEAC_PROF_EVENT_n_CFG(n)	(0x037060 + 4 * n)
+#define FEAC_PROF_CFG			(0x0370A0)
+
+/* FERC */
+#define FERC_PROF_FILTER_0_CFG0		(0x03B000)
+#define FERC_PROF_EVENT_n_CFG(n)	(0x03B020 + 4 * n)
+#define FERC_PROF_CFG			(0x03B060)
+
+/* FEWC */
+#define FEWC_PROF_FILTER_0_CFG0		(0x033000)
+#define FEWC_PROF_EVENT_n_CFG(n)	(0x033020 + 4 * n)
+
+/* BEAC */
+#define BEAC_PROF_FILTER_0_CFG5		(0x049014)
+#define BEAC_PROF_EVENT_n_CFG(n)	(0x049040 + 4 * n)
+#define BEAC_PROF_CFG			(0x049080)
+
+/* BERC */
+#define BERC_PROF_FILTER_0_CFG0		(0x039000)
+#define BERC_PROF_EVENT_n_CFG(n)	(0x039020 + 4 * n)
+#define BERC_PROF_CFG			(0x039060)
+
+/* TRP */
+#define TRP_PROF_FILTER_0_CFG1		(0x024004)
+#define TRP_PROF_EVENT_n_CFG(n)		(0x024020 + 4 * n)
+#define TRP_SCID_n_STATUS(n)		(0x000004 + 0x1000 * n)
+
+/* DRP */
+#define DRP_PROF_EVENT_n_CFG(n)		(0x044010 + 4 * n)
+#define DRP_PROF_CFG			(0x044050)
+
+/* PMGR */
+#define PMGR_PROF_EVENT_n_CFG(n)	(0x03F000 + 4 * n)
+
+#define PERFMON_COUNTER_n_CONFIG(n)	(0x031020 + 4 * n)
+#define PERFMON_MODE			(0x03100C)
+#define PERFMON_DUMP			(0x031010)
+#define BROADCAST_COUNTER_n_VALUE(n)	(0x031060 + 4 * n)
+
+#define LLCC_COUNTER_n_VALUE(n)		(0x031060 + 4 * n)
+
+#define EVENT_NUM_MAX			(64)
+#define SCID_MAX			(32)
+
+/* Perfmon */
+#define CLEAR_ON_ENABLE			BIT(31)
+#define CLEAR_ON_DUMP			BIT(30)
+#define FREEZE_ON_SATURATE		BIT(29)
+#define CHAINING_EN			BIT(28)
+#define COUNT_CLOCK_EVENT		BIT(24)
+
+#define EVENT_SELECT_SHIFT		(16)
+#define PERFMON_EVENT_SELECT_MASK	GENMASK(EVENT_SELECT_SHIFT + 4,\
+						EVENT_SELECT_SHIFT)
+#define PORT_SELECT_SHIFT		(0)
+#define PERFMON_PORT_SELECT_MASK	GENMASK(PORT_SELECT_SHIFT + 3,\
+						PORT_SELECT_SHIFT)
+
+#define MANUAL_MODE			(0)
+#define TIMED_MODE			(1)
+#define TRIGGER_MODE			(2)
+#define MONITOR_EN_SHIFT		(15)
+#define MONITOR_EN			BIT(MONITOR_EN_SHIFT)
+#define PERFMON_MODE_MONITOR_EN_MASK	GENMASK(MONITOR_EN_SHIFT + 0,\
+						MONITOR_EN_SHIFT)
+#define MONITOR_MODE_SHIFT		(0)
+#define PERFMON_MODE_MONITOR_MODE_MASK	GENMASK(MONITOR_MODE_SHIFT + 0,\
+						MONITOR_MODE_SHIFT)
+
+#define MONITOR_DUMP			BIT(0)
+
+/* COMMON */
+#define BYTE_SCALING			(1024)
+#define BEAT_SCALING			(32)
+#define LB_CNT_SHIFT			(28)
+#define LB_CNT_MASK			GENMASK(LB_CNT_SHIFT + 3, \
+						LB_CNT_SHIFT)
+
+#define BYTE_SCALING_SHIFT		(16)
+#define PROF_CFG_BYTE_SCALING_MASK	GENMASK(BYTE_SCALING_SHIFT + 11,\
+						BYTE_SCALING_SHIFT)
+#define BEAT_SCALING_SHIFT		(8)
+#define PROF_CFG_BEAT_SCALING_MASK	GENMASK(BEAT_SCALING_SHIFT + 7,\
+						BEAT_SCALING_SHIFT)
+#define PROF_EN_SHIFT			(0)
+#define PROF_EN				BIT(PROF_EN_SHIFT)
+#define PROF_CFG_EN_MASK		GENMASK(PROF_EN_SHIFT + 0,\
+						PROF_EN_SHIFT)
+
+#define FILTER_EN_SHIFT			(31)
+#define FILTER_EN			BIT(FILTER_EN_SHIFT)
+#define FILTER_EN_MASK			GENMASK(FILTER_EN_SHIFT + 0,\
+						FILTER_EN_SHIFT)
+#define FILTER_0			(0)
+#define FILTER_0_MASK			GENMASK(FILTER_0 + 0, \
+						FILTER_0)
+#define FILTER_1			(1)
+#define FILTER_1_MASK			GENMASK(FILTER_1 + 0, \
+						FILTER_1)
+
+#define FILTER_SEL_SHIFT		(16)
+#define FILTER_SEL_MASK			GENMASK(FILTER_SEL_SHIFT + 0,\
+						FILTER_SEL_SHIFT)
+#define EVENT_SEL_SHIFT			(0)
+#define EVENT_SEL_MASK			GENMASK(EVENT_SEL_SHIFT + 5,\
+						EVENT_SEL_SHIFT)
+
+#define MID_MASK_SHIFT			(16)
+#define MID_MASK_MASK			GENMASK(MID_MASK_SHIFT + 15, \
+						MID_MASK_SHIFT)
+#define MID_MATCH_SHIFT			(0)
+#define MID_MATCH_MASK			GENMASK(MID_MATCH_SHIFT + 15, \
+						MID_MATCH_SHIFT)
+#define SCID_MASK_SHIFT			(16)
+#define SCID_MASK_MASK			GENMASK(SCID_MASK_SHIFT + 15, \
+						SCID_MASK_SHIFT)
+#define SCID_MATCH_SHIFT		(0)
+#define SCID_MATCH_MASK			GENMASK(SCID_MATCH_SHIFT + 15, \
+						SCID_MATCH_SHIFT)
+#define PROFTAG_MASK_SHIFT		(2)
+#define PROFTAG_MASK_MASK		GENMASK(PROFTAG_MASK_SHIFT + 1,\
+						PROFTAG_MASK_SHIFT)
+#define PROFTAG_MATCH_SHIFT		(0)
+#define PROFTAG_MATCH_MASK		GENMASK(PROFTAG_MATCH_SHIFT + 1,\
+						PROFTAG_MATCH_SHIFT)
+/* FEAC */
+#define FEAC_SCALING_FILTER_SEL_SHIFT	(2)
+#define FEAC_SCALING_FILTER_SEL_MASK	GENMASK(FEAC_SCALING_FILTER_SEL_SHIFT \
+					+ 0, \
+					FEAC_SCALING_FILTER_SEL_SHIFT)
+#define FEAC_SCALING_FILTER_EN_SHIFT	(1)
+#define FEAC_SCALING_FILTER_EN		BIT(FEAC_SCALING_FILTER_EN_SHIFT)
+#define FEAC_SCALING_FILTER_EN_MASK	GENMASK(FEAC_SCALING_FILTER_EN_SHIFT \
+					+ 0, \
+					FEAC_SCALING_FILTER_EN_SHIFT)
+/* BEAC */
+#define BEAC_PROFTAG_MASK_SHIFT		(14)
+#define BEAC_PROFTAG_MASK_MASK		GENMASK(BEAC_PROFTAG_MASK_SHIFT + 1,\
+						BEAC_PROFTAG_MASK_SHIFT)
+#define BEAC_PROFTAG_MATCH_SHIFT	(12)
+#define BEAC_PROFTAG_MATCH_MASK		GENMASK(BEAC_PROFTAG_MATCH_SHIFT + 1,\
+						BEAC_PROFTAG_MATCH_SHIFT)
+#define BEAC_MC_PROFTAG_SHIFT		(1)
+#define BEAC_MC_PROFTAG_MASK		GENMASK(BEAC_MC_PROFTAG_SHIFT + 1,\
+					BEAC_MC_PROFTAG_SHIFT)
+/* TRP */
+#define TRP_SCID_MATCH_SHIFT		(0)
+#define TRP_SCID_MATCH_MASK		GENMASK(TRP_SCID_MATCH_SHIFT + 4,\
+						TRP_SCID_MATCH_SHIFT)
+#define TRP_SCID_MASK_SHIFT		(8)
+#define TRP_SCID_MASK_MASK		GENMASK(TRP_SCID_MASK_SHIFT + 4,\
+						TRP_SCID_MASK_SHIFT)
+#define TRP_WAY_ID_MATCH_SHIFT		(16)
+#define TRP_WAY_ID_MATCH_MASK		GENMASK(TRP_WAY_ID_MATCH_SHIFT + 3,\
+						TRP_WAY_ID_MATCH_SHIFT)
+#define TRP_WAY_ID_MASK_SHIFT		(20)
+#define TRP_WAY_ID_MASK_MASK		GENMASK(TRP_WAY_ID_MASK_SHIFT + 3,\
+						TRP_WAY_ID_MASK_SHIFT)
+#define TRP_PROFTAG_MATCH_SHIFT		(24)
+#define TRP_PROFTAG_MATCH_MASK		GENMASK(TRP_PROFTAG_MATCH_SHIFT + 1,\
+						TRP_PROFTAG_MATCH_SHIFT)
+#define TRP_PROFTAG_MASK_SHIFT		(28)
+#define TRP_PROFTAG_MASK_MASK		GENMASK(TRP_PROFTAG_MASK_SHIFT + 1,\
+						TRP_PROFTAG_MASK_SHIFT)
+
+#define TRP_SCID_STATUS_ACTIVE_SHIFT		(0)
+#define TRP_SCID_STATUS_ACTIVE_MASK		GENMASK( \
+						TRP_SCID_STATUS_ACTIVE_SHIFT \
+						+ 0, \
+						TRP_SCID_STATUS_ACTIVE_SHIFT)
+#define TRP_SCID_STATUS_DEACTIVE_SHIFT		(1)
+#define TRP_SCID_STATUS_CURRENT_CAP_SHIFT	(16)
+#define TRP_SCID_STATUS_CURRENT_CAP_MASK	GENMASK( \
+					TRP_SCID_STATUS_CURRENT_CAP_SHIFT \
+					+ 13, \
+					TRP_SCID_STATUS_CURRENT_CAP_SHIFT)
+
+#endif /* _SOC_QCOM_LLCC_PERFMON_H_ */
diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c
index 979c628..b5ce753 100644
--- a/drivers/soc/qcom/msm_performance.c
+++ b/drivers/soc/qcom/msm_performance.c
@@ -25,7 +25,6 @@
 #include <linux/module.h>
 #include <linux/input.h>
 #include <linux/kthread.h>
-#include <soc/qcom/msm-core.h>
 
 static struct mutex managed_cpus_lock;
 
@@ -33,10 +32,6 @@ static struct mutex managed_cpus_lock;
 static unsigned int num_clusters;
 struct cluster {
 	cpumask_var_t cpus;
-	/* Number of CPUs to maintain online */
-	int max_cpu_request;
-	/* To track CPUs that the module decides to offline */
-	cpumask_var_t offlined_cpus;
 	/* stats for load detection */
 	/* IO */
 	u64 last_io_check_ts;
@@ -84,8 +79,6 @@ struct cluster {
 static struct cluster **managed_clusters;
 static bool clusters_inited;
 
-/* Work to evaluate the onlining/offlining CPUs */
-static struct delayed_work evaluate_hotplug_work;
 
 /* To handle cpufreq min/max request */
 struct cpu_status {
@@ -94,11 +87,8 @@ struct cpu_status {
 };
 static DEFINE_PER_CPU(struct cpu_status, cpu_stats);
 
-static unsigned int num_online_managed(struct cpumask *mask);
 static int init_cluster_control(void);
-static int rm_high_pwr_cost_cpus(struct cluster *cl);
 static int init_events_group(void);
-static DEFINE_PER_CPU(unsigned int, cpu_power_cost);
 struct events {
 	spinlock_t cpu_hotplug_lock;
 	bool cpu_hotplug;
@@ -214,65 +204,6 @@ static const struct kernel_param_ops param_ops_num_clusters = {
 };
 device_param_cb(num_clusters, &param_ops_num_clusters, NULL, 0644);
 
-static int set_max_cpus(const char *buf, const struct kernel_param *kp)
-{
-	unsigned int i, ntokens = 0;
-	const char *cp = buf;
-	int val;
-
-	if (!clusters_inited)
-		return -EINVAL;
-
-	while ((cp = strpbrk(cp + 1, ":")))
-		ntokens++;
-
-	if (ntokens != (num_clusters - 1))
-		return -EINVAL;
-
-	cp = buf;
-	for (i = 0; i < num_clusters; i++) {
-
-		if (sscanf(cp, "%d\n", &val) != 1)
-			return -EINVAL;
-		if (val > (int)cpumask_weight(managed_clusters[i]->cpus))
-			return -EINVAL;
-
-		managed_clusters[i]->max_cpu_request = val;
-
-		cp = strnchr(cp, strlen(cp), ':');
-		cp++;
-		trace_set_max_cpus(cpumask_bits(managed_clusters[i]->cpus)[0],
-								val);
-	}
-
-	schedule_delayed_work(&evaluate_hotplug_work, 0);
-
-	return 0;
-}
-
-static int get_max_cpus(char *buf, const struct kernel_param *kp)
-{
-	int i, cnt = 0;
-
-	if (!clusters_inited)
-		return cnt;
-
-	for (i = 0; i < num_clusters; i++)
-		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
-				"%d:", managed_clusters[i]->max_cpu_request);
-	cnt--;
-	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, " ");
-	return cnt;
-}
-
-static const struct kernel_param_ops param_ops_max_cpus = {
-	.set = set_max_cpus,
-	.get = get_max_cpus,
-};
-
-#ifdef CONFIG_MSM_PERFORMANCE_HOTPLUG_ON
-device_param_cb(max_cpus, &param_ops_max_cpus, NULL, 0644);
-#endif
 
 static int set_managed_cpus(const char *buf, const struct kernel_param *kp)
 {
@@ -291,7 +222,6 @@ static int set_managed_cpus(const char *buf, const struct kernel_param *kp)
 		if (cpumask_empty(managed_clusters[i]->cpus)) {
 			mutex_lock(&managed_cpus_lock);
 			cpumask_copy(managed_clusters[i]->cpus, &tmp_mask);
-			cpumask_clear(managed_clusters[i]->offlined_cpus);
 			mutex_unlock(&managed_cpus_lock);
 			break;
 		}
@@ -337,53 +267,6 @@ static const struct kernel_param_ops param_ops_managed_cpus = {
 };
 device_param_cb(managed_cpus, &param_ops_managed_cpus, NULL, 0644);
 
-/* Read-only node: To display all the online managed CPUs */
-static int get_managed_online_cpus(char *buf, const struct kernel_param *kp)
-{
-	int i, cnt = 0, total_cnt = 0;
-	char tmp[MAX_LENGTH_CPU_STRING] = "";
-	struct cpumask tmp_mask;
-	struct cluster *i_cl;
-
-	if (!clusters_inited)
-		return cnt;
-
-	for (i = 0; i < num_clusters; i++) {
-		i_cl = managed_clusters[i];
-
-		cpumask_clear(&tmp_mask);
-		cpumask_complement(&tmp_mask, i_cl->offlined_cpus);
-		cpumask_and(&tmp_mask, i_cl->cpus, &tmp_mask);
-
-		cnt = cpumap_print_to_pagebuf(true, buf, &tmp_mask);
-		if ((i + 1) < num_clusters &&
-		    (total_cnt + cnt + 1) <= MAX_LENGTH_CPU_STRING) {
-			snprintf(tmp + total_cnt, cnt, "%s", buf);
-			tmp[cnt-1] = ':';
-			tmp[cnt] = '\0';
-			total_cnt += cnt;
-		} else if ((i + 1) == num_clusters &&
-			   (total_cnt + cnt) <= MAX_LENGTH_CPU_STRING) {
-			snprintf(tmp + total_cnt, cnt, "%s", buf);
-			total_cnt += cnt;
-		} else {
-			pr_err("invalid string for managed_cpu:%s%s\n", tmp,
-				buf);
-			break;
-		}
-	}
-	snprintf(buf, PAGE_SIZE, "%s", tmp);
-	return total_cnt;
-}
-
-static const struct kernel_param_ops param_ops_managed_online_cpus = {
-	.get = get_managed_online_cpus,
-};
-
-#ifdef CONFIG_MSM_PERFORMANCE_HOTPLUG_ON
-device_param_cb(managed_online_cpus, &param_ops_managed_online_cpus,
-							NULL, 0444);
-#endif
 /*
  * Userspace sends cpu#:min_freq_value to vote for min_freq_value as the new
  * scaling_min. To withdraw its vote it needs to enter cpu#:0
@@ -2274,15 +2157,6 @@ static struct attribute_group events_attr_group = {
 };
 /*******************************sysfs ends************************************/
 
-static unsigned int num_online_managed(struct cpumask *mask)
-{
-	struct cpumask tmp_mask;
-
-	cpumask_clear(&tmp_mask);
-	cpumask_and(&tmp_mask, mask, cpu_online_mask);
-
-	return cpumask_weight(&tmp_mask);
-}
 
 static int perf_adjust_notify(struct notifier_block *nb, unsigned long val,
 							void *data)
@@ -2359,185 +2233,6 @@ static int events_notify_userspace(void *data)
 
 	return 0;
 }
-
-/*
- * Attempt to offline CPUs based on their power cost.
- * CPUs with higher power costs are offlined first.
- */
-static int __ref rm_high_pwr_cost_cpus(struct cluster *cl)
-{
-	unsigned int cpu, i;
-	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
-	struct cpu_pstate_pwr *costs;
-	unsigned int *pcpu_pwr;
-	unsigned int max_cost_cpu, max_cost;
-	int any_cpu = -1;
-
-	if (!per_cpu_info)
-		return -EAGAIN;
-
-	for_each_cpu(cpu, cl->cpus) {
-		costs = per_cpu_info[cpu].ptable;
-		if (!costs || !costs[0].freq)
-			continue;
-
-		i = 1;
-		while (costs[i].freq)
-			i++;
-
-		pcpu_pwr = &per_cpu(cpu_power_cost, cpu);
-		*pcpu_pwr = costs[i - 1].power;
-		any_cpu = (int)cpu;
-		pr_debug("msm_perf: CPU:%d Power:%u\n", cpu, *pcpu_pwr);
-	}
-
-	if (any_cpu < 0)
-		return -EAGAIN;
-
-	for (i = 0; i < cpumask_weight(cl->cpus); i++) {
-		max_cost = 0;
-		max_cost_cpu = cpumask_first(cl->cpus);
-
-		for_each_cpu(cpu, cl->cpus) {
-			pcpu_pwr = &per_cpu(cpu_power_cost, cpu);
-			if (max_cost < *pcpu_pwr) {
-				max_cost = *pcpu_pwr;
-				max_cost_cpu = cpu;
-			}
-		}
-
-		if (!cpu_online(max_cost_cpu))
-			goto end;
-
-		pr_debug("msm_perf: Offlining CPU%d Power:%d\n", max_cost_cpu,
-								max_cost);
-		cpumask_set_cpu(max_cost_cpu, cl->offlined_cpus);
-		lock_device_hotplug();
-		if (device_offline(get_cpu_device(max_cost_cpu))) {
-			cpumask_clear_cpu(max_cost_cpu, cl->offlined_cpus);
-			pr_debug("msm_perf: Offlining CPU%d failed\n",
-								max_cost_cpu);
-		}
-		unlock_device_hotplug();
-
-end:
-		pcpu_pwr = &per_cpu(cpu_power_cost, max_cost_cpu);
-		*pcpu_pwr = 0;
-		if (num_online_managed(cl->cpus) <= cl->max_cpu_request)
-			break;
-	}
-
-	if (num_online_managed(cl->cpus) > cl->max_cpu_request)
-		return -EAGAIN;
-	else
-		return 0;
-}
-
-/*
- * try_hotplug tries to online/offline cores based on the current requirement.
- * It loops through the currently managed CPUs and tries to online/offline
- * them until the max_cpu_request criteria is met.
- */
-static void __ref try_hotplug(struct cluster *data)
-{
-	unsigned int i;
-
-	if (!clusters_inited)
-		return;
-
-	pr_debug("msm_perf: Trying hotplug...%d:%d\n",
-			num_online_managed(data->cpus),	num_online_cpus());
-
-	mutex_lock(&managed_cpus_lock);
-	if (num_online_managed(data->cpus) > data->max_cpu_request) {
-		if (!rm_high_pwr_cost_cpus(data)) {
-			mutex_unlock(&managed_cpus_lock);
-			return;
-		}
-
-		/*
-		 * If power aware offlining fails due to power cost info
-		 * being unavaiable fall back to original implementation
-		 */
-		for (i = num_present_cpus() - 1; i >= 0 &&
-						i < num_present_cpus(); i--) {
-			if (!cpumask_test_cpu(i, data->cpus) ||	!cpu_online(i))
-				continue;
-
-			pr_debug("msm_perf: Offlining CPU%d\n", i);
-			cpumask_set_cpu(i, data->offlined_cpus);
-			lock_device_hotplug();
-			if (device_offline(get_cpu_device(i))) {
-				cpumask_clear_cpu(i, data->offlined_cpus);
-				pr_debug("msm_perf: Offlining CPU%d failed\n",
-									i);
-				unlock_device_hotplug();
-				continue;
-			}
-			unlock_device_hotplug();
-			if (num_online_managed(data->cpus) <=
-							data->max_cpu_request)
-				break;
-		}
-	} else {
-		for_each_cpu(i, data->cpus) {
-			if (cpu_online(i))
-				continue;
-			pr_debug("msm_perf: Onlining CPU%d\n", i);
-			lock_device_hotplug();
-			if (device_online(get_cpu_device(i))) {
-				pr_debug("msm_perf: Onlining CPU%d failed\n",
-									i);
-				unlock_device_hotplug();
-				continue;
-			}
-			unlock_device_hotplug();
-			cpumask_clear_cpu(i, data->offlined_cpus);
-			if (num_online_managed(data->cpus) >=
-							data->max_cpu_request)
-				break;
-		}
-	}
-	mutex_unlock(&managed_cpus_lock);
-}
-
-static void __ref release_cluster_control(struct cpumask *off_cpus)
-{
-	int cpu;
-
-	for_each_cpu(cpu, off_cpus) {
-		pr_debug("msm_perf: Release CPU %d\n", cpu);
-		lock_device_hotplug();
-		if (!device_online(get_cpu_device(cpu)))
-			cpumask_clear_cpu(cpu, off_cpus);
-		unlock_device_hotplug();
-	}
-}
-
-/* Work to evaluate current online CPU status and hotplug CPUs as per need */
-static void check_cluster_status(struct work_struct *work)
-{
-	int i;
-	struct cluster *i_cl;
-
-	for (i = 0; i < num_clusters; i++) {
-		i_cl = managed_clusters[i];
-
-		if (cpumask_empty(i_cl->cpus))
-			continue;
-
-		if (i_cl->max_cpu_request < 0) {
-			if (!cpumask_empty(i_cl->offlined_cpus))
-				release_cluster_control(i_cl->offlined_cpus);
-			continue;
-		}
-
-		if (num_online_managed(i_cl->cpus) !=
-					i_cl->max_cpu_request)
-			try_hotplug(i_cl);
-	}
-}
-
 static int __ref msm_performance_cpu_callback(struct notifier_block *nfb,
 		unsigned long action, void *hcpu)
 {
@@ -2559,43 +2254,6 @@ static int __ref msm_performance_cpu_callback(struct notifier_block *nfb,
 		}
 	}
 
-	if (i_cl == NULL)
-		return NOTIFY_OK;
-
-	if (action == CPU_UP_PREPARE || action == CPU_UP_PREPARE_FROZEN) {
-		/*
-		 * Prevent onlining of a managed CPU if max_cpu criteria is
-		 * already satisfied
-		 */
-		if (i_cl->offlined_cpus == NULL)
-			return NOTIFY_OK;
-		if (i_cl->max_cpu_request <=
-					num_online_managed(i_cl->cpus)) {
-			pr_debug("msm_perf: Prevent CPU%d onlining\n", cpu);
-			cpumask_set_cpu(cpu, i_cl->offlined_cpus);
-			return NOTIFY_BAD;
-		}
-		cpumask_clear_cpu(cpu, i_cl->offlined_cpus);
-
-	} else if (action == CPU_DEAD) {
-		if (i_cl->offlined_cpus == NULL)
-			return NOTIFY_OK;
-		if (cpumask_test_cpu(cpu, i_cl->offlined_cpus))
-			return NOTIFY_OK;
-		/*
-		 * Schedule a re-evaluation to check if any more CPUs can be
-		 * brought online to meet the max_cpu_request requirement. This
-		 * work is delayed to account for CPU hotplug latencies
-		 */
-		if (schedule_delayed_work(&evaluate_hotplug_work, 0)) {
-			trace_reevaluate_hotplug(cpumask_bits(i_cl->cpus)[0],
-							i_cl->max_cpu_request);
-			pr_debug("msm_perf: Re-evaluation scheduled %d\n", cpu);
-		} else {
-			pr_debug("msm_perf: Work scheduling failed %d\n", cpu);
-		}
-	}
-
 	return NOTIFY_OK;
 }
 
@@ -2626,13 +2284,7 @@ static int init_cluster_control(void)
 			ret = -ENOMEM;
 			goto error;
 		}
-		if (!alloc_cpumask_var(&managed_clusters[i]->offlined_cpus,
-		     GFP_KERNEL)) {
-			ret = -ENOMEM;
-			goto error;
-		}
 
-		managed_clusters[i]->max_cpu_request = -1;
 		managed_clusters[i]->single_enter_load = DEF_SINGLE_ENT;
 		managed_clusters[i]->single_exit_load = DEF_SINGLE_EX;
 		managed_clusters[i]->single_enter_cycles
@@ -2669,7 +2321,6 @@ static int init_cluster_control(void)
 			perf_cl_peak_mod_exit_timer;
 	}
 
-	INIT_DELAYED_WORK(&evaluate_hotplug_work, check_cluster_status);
 	mutex_init(&managed_cpus_lock);
 
 	ip_evts = kcalloc(1, sizeof(struct input_events), GFP_KERNEL);
@@ -2707,8 +2358,6 @@ static int init_cluster_control(void)
 	for (i = 0; i < num_clusters; i++) {
 		if (!managed_clusters[i])
 			break;
-		if (managed_clusters[i]->offlined_cpus)
-			free_cpumask_var(managed_clusters[i]->offlined_cpus);
 		if (managed_clusters[i]->cpus)
 			free_cpumask_var(managed_clusters[i]->cpus);
 		kfree(managed_clusters[i]);
diff --git a/drivers/soc/qcom/smp2p_spinlock_test.c b/drivers/soc/qcom/smp2p_spinlock_test.c
deleted file mode 100644
index d1e5090..0000000
--- a/drivers/soc/qcom/smp2p_spinlock_test.c
+++ /dev/null
@@ -1,820 +0,0 @@
-/* drivers/soc/qcom/smp2p_spinlock_test.c
- *
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/debugfs.h>
-#include <linux/ctype.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <linux/remote_spinlock.h>
-#include <soc/qcom/smem.h>
-#include "smem_private.h"
-#include "smp2p_private.h"
-#include "smp2p_test_common.h"
-
-#define RS_END_THIEF_PID_BIT 20
-#define RS_END_THIEF_MASK 0x00f00000
-
-/* Spinlock commands used for testing Apps<->RPM spinlocks. */
-enum RPM_SPINLOCK_CMDS {
-	RPM_CMD_INVALID,
-	RPM_CMD_START,
-	RPM_CMD_LOCKED,
-	RPM_CMD_UNLOCKED,
-	RPM_CMD_END,
-};
-
-/* Shared structure for testing Apps<->RPM spinlocks. */
-struct rpm_spinlock_test {
-	uint32_t apps_cmd;
-	uint32_t apps_lock_count;
-	uint32_t rpm_cmd;
-	uint32_t rpm_lock_count;
-};
-
-static uint32_t ut_remote_spinlock_run_time = 1;
-
-/**
- * smp2p_ut_remote_spinlock_core - Verify remote spinlock.
- *
- * @s:           Pointer to output file
- * @remote_pid:  Remote processor to test
- * @use_trylock: Use trylock to prevent an Apps deadlock if the
- *               remote spinlock fails.
- */
-static void smp2p_ut_remote_spinlock_core(struct seq_file *s, int remote_pid,
-		bool use_trylock)
-{
-	int failed = 0;
-	unsigned int lock_count = 0;
-	struct msm_smp2p_out *handle = NULL;
-	int ret;
-	uint32_t test_request;
-	uint32_t test_response;
-	struct mock_cb_data cb_out;
-	struct mock_cb_data cb_in;
-	unsigned long flags;
-	unsigned int n;
-	bool have_lock;
-	bool timeout;
-	int failed_tmp;
-	int spinlock_owner;
-	remote_spinlock_t *smem_spinlock;
-	unsigned long end;
-
-	seq_printf(s, "Running %s for '%s' remote pid %d\n",
-		   __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
-	cb_out.initialized = false;
-	cb_in.initialized = false;
-	mock_cb_data_init(&cb_out);
-	mock_cb_data_init(&cb_in);
-	do {
-		smem_spinlock = smem_get_remote_spinlock();
-		UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
-		/* Open output entry */
-		ret = msm_smp2p_out_open(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-			&cb_out.nb, &handle);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_out.cb_completion, HZ * 2),
-			>, 0);
-		UT_ASSERT_INT(cb_out.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_out.event_open, ==, 1);
-
-		/* Open inbound entry */
-		ret = msm_smp2p_in_register(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-				&cb_in.nb);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_in.cb_completion, HZ * 2),
-			>, 0);
-		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_in.event_open, ==, 1);
-
-		/* Send start */
-		mock_cb_data_reset(&cb_in);
-		mock_cb_data_reset(&cb_out);
-		test_request = 0x0;
-		SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
-		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_START);
-		SMP2P_SET_RMT_DATA(test_request, 0x0);
-		ret = msm_smp2p_out_write(handle, test_request);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_in.cb_completion, HZ * 2),
-			>, 0);
-		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
-		ret = msm_smp2p_in_read(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-				&test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		test_response = SMP2P_GET_RMT_CMD(test_response);
-		if (test_response != SMP2P_LB_CMD_RSPIN_LOCKED &&
-				test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED) {
-			/* invalid response from remote - abort test */
-			test_request = 0x0;
-			SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-			SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-			SMP2P_SET_RMT_DATA(test_request, 0x0);
-			ret = msm_smp2p_out_write(handle, test_request);
-			UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_LOCKED, ==,
-					test_response);
-		}
-
-		/* Run spinlock test */
-		if (use_trylock)
-			seq_puts(s, "\tUsing remote_spin_trylock\n");
-		else
-			seq_puts(s, "\tUsing remote_spin_lock\n");
-
-		flags = 0;
-		have_lock = false;
-		timeout = false;
-		spinlock_owner = 0;
-		test_request = 0x0;
-		SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
-		end = jiffies + (ut_remote_spinlock_run_time * HZ);
-		if (ut_remote_spinlock_run_time < 300) {
-			seq_printf(s, "\tRunning test for %u seconds; ",
-				ut_remote_spinlock_run_time);
-			seq_puts(s,
-				"on physical hardware please run >= 300 seconds by doing 'echo 300 >  ut_remote_spinlock_time'\n");
-		}
-		while (time_is_after_jiffies(end)) {
-			/* try to acquire spinlock */
-			if (use_trylock) {
-				unsigned long j_start = jiffies;
-
-				while (!remote_spin_trylock_irqsave(
-						smem_spinlock, flags)) {
-					if (jiffies_to_msecs(jiffies - j_start)
-							> 1000) {
-						seq_puts(s,
-							"\tFail: Timeout trying to get the lock\n");
-						timeout = true;
-						break;
-					}
-				}
-				if (timeout)
-					break;
-			} else {
-				remote_spin_lock_irqsave(smem_spinlock, flags);
-			}
-			have_lock = true;
-			++lock_count;
-
-			/* tell the remote side that we have the lock */
-			SMP2P_SET_RMT_DATA(test_request, lock_count);
-			SMP2P_SET_RMT_CMD(test_request,
-					SMP2P_LB_CMD_RSPIN_LOCKED);
-			ret = msm_smp2p_out_write(handle, test_request);
-			UT_ASSERT_INT(ret, ==, 0);
-
-			/* verify the other side doesn't say it has the lock */
-			for (n = 0; n < 1000; ++n) {
-				spinlock_owner =
-					remote_spin_owner(smem_spinlock);
-				if (spinlock_owner != SMEM_APPS) {
-					/* lock stolen by remote side */
-					seq_puts(s, "\tFail: Remote side: ");
-					seq_printf(s, "%d stole lock pid: %d\n",
-						remote_pid, spinlock_owner);
-					failed = true;
-					break;
-				}
-				spinlock_owner = 0;
-
-				ret = msm_smp2p_in_read(remote_pid,
-					SMP2P_RLPB_ENTRY_NAME, &test_response);
-				UT_ASSERT_INT(ret, ==, 0);
-				test_response =
-					SMP2P_GET_RMT_CMD(test_response);
-				UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_UNLOCKED, ==,
-					test_response);
-			}
-			if (failed)
-				break;
-
-			/* tell remote side we are unlocked and release lock */
-			SMP2P_SET_RMT_CMD(test_request,
-					SMP2P_LB_CMD_RSPIN_UNLOCKED);
-			(void)msm_smp2p_out_write(handle, test_request);
-			have_lock = false;
-			remote_spin_unlock_irqrestore(smem_spinlock, flags);
-		}
-		if (have_lock)
-			remote_spin_unlock_irqrestore(smem_spinlock, flags);
-
-		/* End test */
-		mock_cb_data_reset(&cb_in);
-		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-		SMP2P_SET_RMT_DATA(test_request, lock_count |
-				(spinlock_owner << RS_END_THIEF_PID_BIT));
-		(void)msm_smp2p_out_write(handle, test_request);
-
-		failed_tmp = failed;
-		failed = false;
-		do {
-			UT_ASSERT_INT(
-				(int)wait_for_completion_timeout(
-					&cb_in.cb_completion, HZ * 2),
-				>, 0);
-			reinit_completion(&cb_in.cb_completion);
-			ret = msm_smp2p_in_read(remote_pid,
-					SMP2P_RLPB_ENTRY_NAME, &test_response);
-			UT_ASSERT_INT(ret, ==, 0);
-		} while (!failed &&
-			SMP2P_GET_RMT_CMD(test_response) !=
-			SMP2P_LB_CMD_RSPIN_END);
-		if (failed)
-			break;
-		failed = failed_tmp;
-
-		test_response = SMP2P_GET_RMT_DATA(test_response);
-		seq_puts(s, "\tLocked spinlock ");
-		seq_printf(s, "local %u times; remote %u times",
-			lock_count,
-			test_response & ((1 << RS_END_THIEF_PID_BIT) - 1)
-			);
-		if (test_response & RS_END_THIEF_MASK) {
-			seq_puts(s, "Remote side reporting lock stolen by ");
-			seq_printf(s, "pid %d.\n",
-				SMP2P_GET_BITS(test_response,
-					RS_END_THIEF_MASK,
-					RS_END_THIEF_PID_BIT));
-			failed = 1;
-		}
-		seq_puts(s, "\n");
-
-		/* Cleanup */
-		ret = msm_smp2p_out_close(&handle);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_PTR(handle, ==, NULL);
-		ret = msm_smp2p_in_unregister(remote_pid,
-				SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		if (!failed && !timeout)
-			seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		if (handle) {
-			/* send end command */
-			test_request = 0;
-			SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-			SMP2P_SET_RMT_DATA(test_request, lock_count);
-			(void)msm_smp2p_out_write(handle, test_request);
-			(void)msm_smp2p_out_close(&handle);
-		}
-		(void)msm_smp2p_in_unregister(remote_pid,
-				SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
-
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-	}
-}
-
-/**
- * smp2p_ut_remote_spinlock_pid - Verify remote spinlock for a processor.
- *
- * @s:           Pointer to output file
- * @pid:         Processor to test
- * @use_trylock: Use trylock to prevent an Apps deadlock if the
- *               remote spinlock fails.
- */
-static void smp2p_ut_remote_spinlock_pid(struct seq_file *s, int pid,
-		bool use_trylock)
-{
-	struct smp2p_interrupt_config *int_cfg;
-
-	int_cfg = smp2p_get_interrupt_config();
-	if (!int_cfg) {
-		seq_puts(s, "Remote processor config unavailable\n");
-		return;
-	}
-
-	if (pid >= SMP2P_NUM_PROCS || !int_cfg[pid].is_configured)
-		return;
-
-	msm_smp2p_deinit_rmt_lpb_proc(pid);
-	smp2p_ut_remote_spinlock_core(s, pid, use_trylock);
-	msm_smp2p_init_rmt_lpb_proc(pid);
-}
-
-/**
- * smp2p_ut_remote_spinlock - Verify remote spinlock for all processors.
- *
- * @s:   pointer to output file
- */
-static void smp2p_ut_remote_spinlock(struct seq_file *s)
-{
-	int pid;
-
-	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
-		smp2p_ut_remote_spinlock_pid(s, pid, false);
-}
-
-/**
- * smp2p_ut_remote_spin_trylock - Verify remote trylock for all processors.
- *
- * @s:   Pointer to output file
- */
-static void smp2p_ut_remote_spin_trylock(struct seq_file *s)
-{
-	int pid;
-
-	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
-		smp2p_ut_remote_spinlock_pid(s, pid, true);
-}
-
-/**
- * smp2p_ut_remote_spinlock - Verify remote spinlock for all processors.
- *
- * @s:   pointer to output file
- *
- * This test verifies inbound and outbound functionality for all
- * configured remote processor.
- */
-static void smp2p_ut_remote_spinlock_modem(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_pid(s, SMP2P_MODEM_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_adsp(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_pid(s, SMP2P_AUDIO_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_dsps(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_pid(s, SMP2P_SENSOR_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_wcnss(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_pid(s, SMP2P_WIRELESS_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_cdsp(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_pid(s, SMP2P_CDSP_PROC, false);
-}
-
-static void smp2p_ut_remote_spinlock_tz(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_pid(s, SMP2P_TZ_PROC, false);
-}
-
-/**
- * smp2p_ut_remote_spinlock_rpm - Verify remote spinlock.
- *
- * @s:   pointer to output file
- * @remote_pid:  Remote processor to test
- */
-static void smp2p_ut_remote_spinlock_rpm(struct seq_file *s)
-{
-	int failed = 0;
-	unsigned long flags;
-	unsigned int n;
-	unsigned int test_num;
-	struct rpm_spinlock_test *data_ptr;
-	remote_spinlock_t *smem_spinlock;
-	bool have_lock;
-
-	seq_printf(s, "Running %s for Apps<->RPM Test\n",
-		   __func__);
-	do {
-		smem_spinlock = smem_get_remote_spinlock();
-		UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
-		data_ptr = smem_alloc(SMEM_ID_VENDOR0,
-				sizeof(struct rpm_spinlock_test), 0,
-				SMEM_ANY_HOST_FLAG);
-		UT_ASSERT_PTR(0, !=, data_ptr);
-
-		/* Send start */
-		writel_relaxed(0, &data_ptr->apps_lock_count);
-		writel_relaxed(RPM_CMD_START, &data_ptr->apps_cmd);
-
-		seq_puts(s, "\tWaiting for RPM to start test\n");
-		for (n = 0; n < 1000; ++n) {
-			if (readl_relaxed(&data_ptr->rpm_cmd) !=
-					RPM_CMD_INVALID)
-				break;
-			usleep_range(1000, 1200);
-		}
-		if (readl_relaxed(&data_ptr->rpm_cmd) == RPM_CMD_INVALID) {
-			/* timeout waiting for RPM */
-			writel_relaxed(RPM_CMD_INVALID, &data_ptr->apps_cmd);
-			UT_ASSERT_INT(RPM_CMD_LOCKED, !=, RPM_CMD_INVALID);
-		}
-
-		/* Run spinlock test */
-		flags = 0;
-		have_lock = false;
-		for (test_num = 0; !failed && test_num < 10000; ++test_num) {
-			/* acquire spinlock */
-			remote_spin_lock_irqsave(smem_spinlock, flags);
-			have_lock = true;
-			data_ptr->apps_lock_count++;
-			writel_relaxed(data_ptr->apps_lock_count,
-				&data_ptr->apps_lock_count);
-			writel_relaxed(RPM_CMD_LOCKED, &data_ptr->apps_cmd);
-			/*
-			 * Ensure that the remote side sees our lock has
-			 * been acquired before we start polling their status.
-			 */
-			wmb();
-
-			/* verify the other side doesn't say it has the lock */
-			for (n = 0; n < 1000; ++n) {
-				UT_ASSERT_HEX(RPM_CMD_UNLOCKED, ==,
-					readl_relaxed(&data_ptr->rpm_cmd));
-			}
-			if (failed)
-				break;
-
-			/* release spinlock */
-			have_lock = false;
-			writel_relaxed(RPM_CMD_UNLOCKED, &data_ptr->apps_cmd);
-			/*
-			 * Ensure that our status-update write was committed
-			 * before we unlock the spinlock.
-			 */
-			wmb();
-			remote_spin_unlock_irqrestore(smem_spinlock, flags);
-		}
-		if (have_lock)
-			remote_spin_unlock_irqrestore(smem_spinlock, flags);
-
-		/* End test */
-		writel_relaxed(RPM_CMD_INVALID, &data_ptr->apps_cmd);
-		seq_printf(s, "\tLocked spinlock local %u remote %u\n",
-				readl_relaxed(&data_ptr->apps_lock_count),
-				readl_relaxed(&data_ptr->rpm_lock_count));
-
-		if (!failed)
-			seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-	}
-}
-
-struct rmt_spinlock_work_item {
-	struct work_struct work;
-	struct completion try_lock;
-	struct completion locked;
-	bool has_locked;
-};
-
-static void ut_remote_spinlock_ssr_worker(struct work_struct *work)
-{
-	remote_spinlock_t *smem_spinlock;
-	unsigned long flags;
-	struct rmt_spinlock_work_item *work_item =
-		container_of(work, struct rmt_spinlock_work_item, work);
-
-	work_item->has_locked = false;
-	complete(&work_item->try_lock);
-	smem_spinlock = smem_get_remote_spinlock();
-	if (!smem_spinlock) {
-		pr_err("%s Failed\n", __func__);
-		return;
-	}
-
-	remote_spin_lock_irqsave(smem_spinlock, flags);
-	remote_spin_unlock_irqrestore(smem_spinlock, flags);
-	work_item->has_locked = true;
-	complete(&work_item->locked);
-}
-
-/**
- * smp2p_ut_remote_spinlock_ssr - Verify remote spinlock.
- *
- * @s:   pointer to output file
- */
-static void smp2p_ut_remote_spinlock_ssr(struct seq_file *s)
-{
-	int failed = 0;
-	unsigned long flags;
-	remote_spinlock_t *smem_spinlock;
-	int spinlock_owner = 0;
-
-	struct workqueue_struct *ws = NULL;
-	struct rmt_spinlock_work_item work_item = { .has_locked = false };
-
-	seq_printf(s, " Running %s Test\n",
-		   __func__);
-	do {
-		smem_spinlock = smem_get_remote_spinlock();
-		UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
-		ws = create_singlethread_workqueue("ut_remote_spinlock_ssr");
-		UT_ASSERT_PTR(ws, !=, NULL);
-		INIT_WORK(&work_item.work, ut_remote_spinlock_ssr_worker);
-		init_completion(&work_item.try_lock);
-		init_completion(&work_item.locked);
-
-		remote_spin_lock_irqsave(smem_spinlock, flags);
-		/* Unlock local spin lock and hold HW spinlock */
-		spin_unlock_irqrestore(&((smem_spinlock)->local), flags);
-
-		queue_work(ws, &work_item.work);
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&work_item.try_lock, HZ * 2), >, 0);
-		UT_ASSERT_INT((int)work_item.has_locked, ==, 0);
-		spinlock_owner = remote_spin_owner(smem_spinlock);
-		UT_ASSERT_INT(spinlock_owner, ==, SMEM_APPS);
-		remote_spin_release_all(SMEM_APPS);
-
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&work_item.locked, HZ * 2), >, 0);
-
-		if (!failed)
-			seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-	}
-}
-
-/**
- * smp2p_ut_remote_spinlock_track_core - Verify remote spinlock.
- *
- * @s:           Pointer to output file
- * @remote_pid:  Remote processor to test
- *
- * This test has the remote subsystem grab the lock, and then has the local
- * subsystem attempt to grab the lock using the trylock() API. It then verifies
- * that the ID in the hw_spinlocks array matches the owner of the lock.
- */
-static void smp2p_ut_remote_spinlock_track_core(struct seq_file *s,
-		int remote_pid)
-{
-	int failed = 0;
-	struct msm_smp2p_out *handle = NULL;
-	int ret;
-	uint32_t test_request;
-	uint32_t test_response;
-	struct mock_cb_data cb_out;
-	struct mock_cb_data cb_in;
-	unsigned long flags;
-	int stored_value;
-	remote_spinlock_t *smem_spinlock;
-
-	seq_printf(s, "Running %s for '%s' remote pid %d\n",
-		   __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
-	cb_out.initialized = false;
-	cb_in.initialized = false;
-	mock_cb_data_init(&cb_out);
-	mock_cb_data_init(&cb_in);
-	do {
-		smem_spinlock = smem_get_remote_spinlock();
-		UT_ASSERT_PTR(smem_spinlock, !=, NULL);
-
-		/* Open output entry */
-		ret = msm_smp2p_out_open(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-			&cb_out.nb, &handle);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_out.cb_completion, HZ * 2),
-			>, 0);
-		UT_ASSERT_INT(cb_out.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_out.event_open, ==, 1);
-
-		/* Open inbound entry */
-		ret = msm_smp2p_in_register(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-				&cb_in.nb);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_in.cb_completion, HZ * 2),
-			>, 0);
-		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_in.event_open, ==, 1);
-
-		/* Send start */
-		mock_cb_data_reset(&cb_in);
-		mock_cb_data_reset(&cb_out);
-		test_request = 0x0;
-		SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
-		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_START);
-		SMP2P_SET_RMT_DATA(test_request, 0x0);
-		ret = msm_smp2p_out_write(handle, test_request);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_in.cb_completion, HZ * 2),
-			>, 0);
-		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
-		ret = msm_smp2p_in_read(remote_pid, SMP2P_RLPB_ENTRY_NAME,
-				&test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		test_response = SMP2P_GET_RMT_CMD(test_response);
-		if (test_response != SMP2P_LB_CMD_RSPIN_LOCKED &&
-				test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED) {
-			/* invalid response from remote - abort test */
-			test_request = 0x0;
-			SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-			SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-			SMP2P_SET_RMT_DATA(test_request, 0x0);
-			ret = msm_smp2p_out_write(handle, test_request);
-			UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_LOCKED, ==,
-					test_response);
-		}
-
-		/* Run spinlock test */
-		flags = 0;
-		test_request = 0x0;
-		SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
-
-		/* try to acquire spinlock */
-		remote_spin_trylock_irqsave(smem_spinlock, flags);
-		/*
-		 * Need to check against the locking token (PID + 1)
-		 * because the remote_spin_owner() API only returns the
-		 * PID.
-		 */
-		stored_value = remote_spin_get_hw_spinlocks_element(
-				smem_spinlock);
-		UT_ASSERT_INT(stored_value, ==,
-			remote_spin_owner(smem_spinlock) + 1);
-		UT_ASSERT_INT(stored_value, ==, remote_pid + 1);
-
-		/* End test */
-		test_request = 0x0;
-		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-		SMP2P_SET_RMT_DATA(test_request, 0x0);
-		(void)msm_smp2p_out_write(handle, test_request);
-
-		/* Cleanup */
-		ret = msm_smp2p_out_close(&handle);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_PTR(handle, ==, NULL);
-		ret = msm_smp2p_in_unregister(remote_pid,
-				SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		if (!failed)
-			seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		if (handle) {
-			/* send end command */
-			test_request = 0x0;
-			SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
-			SMP2P_SET_RMT_DATA(test_request, 0x0);
-			(void)msm_smp2p_out_write(handle, test_request);
-			(void)msm_smp2p_out_close(&handle);
-		}
-		(void)msm_smp2p_in_unregister(remote_pid,
-				SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
-
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-	}
-}
-
-/**
- * smp2p_ut_remote_spinlock_track - Verify PID tracking for modem.
- *
- * @s:	Pointer to output file
- * @pid:		The processor to test
- */
-static void smp2p_ut_remote_spinlock_track(struct seq_file *s, int pid)
-{
-	struct smp2p_interrupt_config *int_cfg;
-
-	int_cfg = smp2p_get_interrupt_config();
-	if (!int_cfg) {
-		seq_puts(s, "Remote processor config unavailable\n");
-		return;
-	}
-
-	if (pid >= SMP2P_NUM_PROCS || !int_cfg[pid].is_configured)
-		return;
-
-	msm_smp2p_deinit_rmt_lpb_proc(pid);
-	smp2p_ut_remote_spinlock_track_core(s, pid);
-	msm_smp2p_init_rmt_lpb_proc(pid);
-}
-
-/**
- * smp2p_ut_remote_spinlock_track - Verify PID tracking for all processors.
- *
- * @s:	Pointer to output file
- *
- * This test verifies PID tracking for all configured remote processors.
- */
-static void smp2p_ut_remote_spinlock_track_modem(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_track(s, SMP2P_MODEM_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_adsp(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_track(s, SMP2P_AUDIO_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_dsps(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_track(s, SMP2P_SENSOR_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_wcnss(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_track(s, SMP2P_WIRELESS_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_cdsp(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_track(s, SMP2P_CDSP_PROC);
-}
-
-static void smp2p_ut_remote_spinlock_track_tz(struct seq_file *s)
-{
-	smp2p_ut_remote_spinlock_track(s, SMP2P_TZ_PROC);
-}
-
-static int __init smp2p_debugfs_init(void)
-{
-	/*
-	 * Add Unit Test entries.
-	 *
-	 * The idea with unit tests is that you can run all of them
-	 * from ADB shell by doing:
-	 *  adb shell
-	 *  cat ut*
-	 *
-	 * And if particular tests fail, you can then repeatedly run the
-	 * failing tests as you debug and resolve the failing test.
-	 */
-	smp2p_debug_create("ut_remote_spinlock",
-		smp2p_ut_remote_spinlock);
-	smp2p_debug_create("ut_remote_spin_trylock",
-		smp2p_ut_remote_spin_trylock);
-	smp2p_debug_create("ut_remote_spinlock_modem",
-		smp2p_ut_remote_spinlock_modem);
-	smp2p_debug_create("ut_remote_spinlock_adsp",
-		smp2p_ut_remote_spinlock_adsp);
-	smp2p_debug_create("ut_remote_spinlock_dsps",
-		smp2p_ut_remote_spinlock_dsps);
-	smp2p_debug_create("ut_remote_spinlock_wcnss",
-		smp2p_ut_remote_spinlock_wcnss);
-	smp2p_debug_create("ut_remote_spinlock_cdsp",
-		smp2p_ut_remote_spinlock_cdsp);
-	smp2p_debug_create("ut_remote_spinlock_tz",
-		smp2p_ut_remote_spinlock_tz);
-	smp2p_debug_create("ut_remote_spinlock_rpm",
-		smp2p_ut_remote_spinlock_rpm);
-	smp2p_debug_create_u32("ut_remote_spinlock_time",
-		&ut_remote_spinlock_run_time);
-	smp2p_debug_create("ut_remote_spinlock_ssr",
-		&smp2p_ut_remote_spinlock_ssr);
-	smp2p_debug_create("ut_remote_spinlock_track_modem",
-		&smp2p_ut_remote_spinlock_track_modem);
-	smp2p_debug_create("ut_remote_spinlock_track_adsp",
-		&smp2p_ut_remote_spinlock_track_adsp);
-	smp2p_debug_create("ut_remote_spinlock_track_dsps",
-		&smp2p_ut_remote_spinlock_track_dsps);
-	smp2p_debug_create("ut_remote_spinlock_track_wcnss",
-		&smp2p_ut_remote_spinlock_track_wcnss);
-	smp2p_debug_create("ut_remote_spinlock_track_cdsp",
-		&smp2p_ut_remote_spinlock_track_cdsp);
-	smp2p_debug_create("ut_remote_spinlock_track_tz",
-		&smp2p_ut_remote_spinlock_track_tz);
-	return 0;
-}
-module_init(smp2p_debugfs_init);
diff --git a/drivers/soc/qcom/smp2p_test.c b/drivers/soc/qcom/smp2p_test.c
deleted file mode 100644
index aa8d0c8..0000000
--- a/drivers/soc/qcom/smp2p_test.c
+++ /dev/null
@@ -1,1324 +0,0 @@
-/* drivers/soc/qcom/smp2p_test.c
- *
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/debugfs.h>
-#include <linux/ctype.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <soc/qcom/subsystem_restart.h>
-#include "smp2p_private.h"
-#include "smp2p_test_common.h"
-
-/**
- * smp2p_ut_local_basic - Basic sanity test using local loopback.
- *
- * @s: pointer to output file
- *
- * This test simulates a simple write and read
- * when remote processor does not exist.
- */
-static void smp2p_ut_local_basic(struct seq_file *s)
-{
-	int failed = 0;
-	struct msm_smp2p_out *smp2p_obj;
-	struct msm_smp2p_remote_mock *rmp = NULL;
-	int ret;
-	uint32_t test_request;
-	uint32_t test_response = 0;
-	static struct mock_cb_data cb_data;
-
-	seq_printf(s, "Running %s\n", __func__);
-	mock_cb_data_init(&cb_data);
-	do {
-		/* initialize mock edge and start opening */
-		ret = smp2p_reset_mock_edge();
-		UT_ASSERT_INT(ret, ==, 0);
-
-		rmp = msm_smp2p_get_remote_mock();
-		UT_ASSERT_PTR(rmp, !=, NULL);
-
-		rmp->rx_interrupt_count = 0;
-		memset(&rmp->remote_item, 0,
-			sizeof(struct smp2p_smem_item));
-
-		msm_smp2p_set_remote_mock_exists(false);
-
-		ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
-			&cb_data.nb, &smp2p_obj);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-		UT_ASSERT_INT(cb_data.cb_count, ==, 0);
-		rmp->rx_interrupt_count = 0;
-
-		/* simulate response from remote side */
-		rmp->remote_item.header.magic = SMP2P_MAGIC;
-		SMP2P_SET_LOCAL_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-					SMP2P_REMOTE_MOCK_PROC);
-		SMP2P_SET_REMOTE_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-					SMP2P_APPS_PROC);
-		SMP2P_SET_VERSION(
-		rmp->remote_item.header.feature_version, 1);
-		SMP2P_SET_FEATURES(
-		rmp->remote_item.header.feature_version, 0);
-		SMP2P_SET_ENT_TOTAL(
-		rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
-		SMP2P_SET_ENT_VALID(
-		rmp->remote_item.header.valid_total_ent, 0);
-		rmp->remote_item.header.flags = 0x0;
-		msm_smp2p_set_remote_mock_exists(true);
-		rmp->tx_interrupt();
-
-		/* verify port was opened */
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_data.cb_completion, HZ / 2), >, 0);
-		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_data.event_open, ==, 1);
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
-		/* do write (test outbound entries) */
-		rmp->rx_interrupt_count = 0;
-		test_request = 0xC0DE;
-		ret = msm_smp2p_out_write(smp2p_obj, test_request);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-		/* do read (test inbound entries) */
-		ret = msm_smp2p_out_read(smp2p_obj, &test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(test_request, ==, test_response);
-
-		ret = msm_smp2p_out_close(&smp2p_obj);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_PTR(smp2p_obj, ==, 0);
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-		(void)msm_smp2p_out_close(&smp2p_obj);
-	}
-}
-
-/**
- * smp2p_ut_local_late_open - Verify post-negotiation opening.
- *
- * @s: pointer to output file
- *
- * Verify entry creation for opening entries after negotiation is complete.
- */
-static void smp2p_ut_local_late_open(struct seq_file *s)
-{
-	int failed = 0;
-	struct msm_smp2p_out *smp2p_obj;
-	struct msm_smp2p_remote_mock *rmp = NULL;
-	int ret;
-	uint32_t test_request;
-	uint32_t test_response = 0;
-	static struct mock_cb_data cb_data;
-
-	seq_printf(s, "Running %s\n", __func__);
-	mock_cb_data_init(&cb_data);
-	do {
-		/* initialize mock edge */
-		ret = smp2p_reset_mock_edge();
-		UT_ASSERT_INT(ret, ==, 0);
-
-		rmp = msm_smp2p_get_remote_mock();
-		UT_ASSERT_PTR(rmp, !=, NULL);
-
-		rmp->rx_interrupt_count = 0;
-		memset(&rmp->remote_item, 0,
-			sizeof(struct smp2p_smem_item));
-		rmp->remote_item.header.magic = SMP2P_MAGIC;
-		SMP2P_SET_LOCAL_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-						SMP2P_REMOTE_MOCK_PROC);
-		SMP2P_SET_REMOTE_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-						SMP2P_APPS_PROC);
-		SMP2P_SET_VERSION(
-			rmp->remote_item.header.feature_version, 1);
-		SMP2P_SET_FEATURES(
-			rmp->remote_item.header.feature_version, 0);
-		SMP2P_SET_ENT_TOTAL(
-			rmp->remote_item.header.valid_total_ent,
-			SMP2P_MAX_ENTRY);
-		SMP2P_SET_ENT_VALID(
-		rmp->remote_item.header.valid_total_ent, 0);
-		rmp->remote_item.header.flags = 0x0;
-
-		msm_smp2p_set_remote_mock_exists(true);
-
-		ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
-			&cb_data.nb, &smp2p_obj);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		/* verify port was opened */
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_data.cb_completion, HZ / 2),
-			>, 0);
-		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_data.event_open, ==, 1);
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
-		/* do write (test outbound entries) */
-		rmp->rx_interrupt_count = 0;
-		test_request = 0xC0DE;
-		ret = msm_smp2p_out_write(smp2p_obj, test_request);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-		/* do read (test inbound entries) */
-		ret = msm_smp2p_out_read(smp2p_obj, &test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(test_request, ==, test_response);
-
-		ret = msm_smp2p_out_close(&smp2p_obj);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_PTR(smp2p_obj, ==, 0);
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-		(void)msm_smp2p_out_close(&smp2p_obj);
-	}
-}
-
-/**
- * smp2p_ut_local_early_open - Verify pre-negotiation opening.
- *
- * @s: pointer to output file
- *
- * Verify entry creation for opening entries before negotiation is complete.
- */
-static void smp2p_ut_local_early_open(struct seq_file *s)
-{
-	int failed = 0;
-	struct msm_smp2p_out *smp2p_obj;
-	struct msm_smp2p_remote_mock *rmp = NULL;
-	struct smp2p_smem *outbound_item;
-	int negotiation_state;
-	int ret;
-	uint32_t test_request;
-	uint32_t test_response = 0;
-	static struct mock_cb_data cb_data;
-
-	seq_printf(s, "Running %s\n", __func__);
-	mock_cb_data_init(&cb_data);
-	do {
-		/* initialize mock edge, but don't enable, yet */
-		ret = smp2p_reset_mock_edge();
-		UT_ASSERT_INT(ret, ==, 0);
-
-		rmp = msm_smp2p_get_remote_mock();
-		UT_ASSERT_PTR(rmp, !=, NULL);
-
-		rmp->rx_interrupt_count = 0;
-		memset(&rmp->remote_item, 0,
-			sizeof(struct smp2p_smem_item));
-		rmp->remote_item.header.magic = SMP2P_MAGIC;
-		SMP2P_SET_LOCAL_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-						SMP2P_REMOTE_MOCK_PROC);
-		SMP2P_SET_REMOTE_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-						SMP2P_APPS_PROC);
-		SMP2P_SET_VERSION(
-		rmp->remote_item.header.feature_version, 1);
-		SMP2P_SET_FEATURES(
-		rmp->remote_item.header.feature_version, 0);
-		SMP2P_SET_ENT_TOTAL(
-		rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
-		SMP2P_SET_ENT_VALID(
-		rmp->remote_item.header.valid_total_ent, 0);
-		rmp->remote_item.header.flags = 0x0;
-
-		msm_smp2p_set_remote_mock_exists(false);
-		UT_ASSERT_PTR(NULL, ==,
-				smp2p_get_in_item(SMP2P_REMOTE_MOCK_PROC));
-
-		/* initiate open, but verify it doesn't complete */
-		ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
-			&cb_data.nb, &smp2p_obj);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_data.cb_completion, HZ / 8),
-			==, 0);
-		UT_ASSERT_INT(cb_data.cb_count, ==, 0);
-		UT_ASSERT_INT(cb_data.event_open, ==, 0);
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-		outbound_item = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
-				&negotiation_state);
-		UT_ASSERT_PTR(outbound_item, !=, NULL);
-		UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENING);
-		UT_ASSERT_INT(0, ==,
-			SMP2P_GET_ENT_VALID(outbound_item->valid_total_ent));
-
-		/* verify that read/write don't work yet */
-		rmp->rx_interrupt_count = 0;
-		test_request = 0x0;
-		ret = msm_smp2p_out_write(smp2p_obj, test_request);
-		UT_ASSERT_INT(ret, ==, -ENODEV);
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 0);
-
-		ret = msm_smp2p_out_read(smp2p_obj, &test_response);
-		UT_ASSERT_INT(ret, ==, -ENODEV);
-
-		/* allocate remote entry and verify open */
-		msm_smp2p_set_remote_mock_exists(true);
-		rmp->tx_interrupt();
-
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_data.cb_completion, HZ / 2),
-			>, 0);
-		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_data.event_open, ==, 1);
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
-		/* do write (test outbound entries) */
-		rmp->rx_interrupt_count = 0;
-		test_request = 0xC0DE;
-		ret = msm_smp2p_out_write(smp2p_obj, test_request);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-		/* do read (test inbound entries) */
-		ret = msm_smp2p_out_read(smp2p_obj, &test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(test_request, ==, test_response);
-
-		ret = msm_smp2p_out_close(&smp2p_obj);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_PTR(smp2p_obj, ==, 0);
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-		(void)msm_smp2p_out_close(&smp2p_obj);
-	}
-}
-
-/**
- * smp2p_ut_mock_loopback - Exercise the remote loopback using remote mock.
- *
- * @s: pointer to output file
- *
- * This test exercises the remote loopback code using
- * remote mock object. The remote mock object simulates the remote
- * processor sending remote loopback commands to the local processor.
- */
-static void smp2p_ut_mock_loopback(struct seq_file *s)
-{
-	int failed = 0;
-	struct msm_smp2p_remote_mock *rmp = NULL;
-	int ret;
-	uint32_t test_request = 0;
-	uint32_t test_response = 0;
-	struct msm_smp2p_out  *local;
-
-	seq_printf(s, "Running %s\n", __func__);
-	do {
-		/* Initialize the mock edge */
-		ret = smp2p_reset_mock_edge();
-		UT_ASSERT_INT(ret, ==, 0);
-
-		rmp = msm_smp2p_get_remote_mock();
-		UT_ASSERT_PTR(rmp, !=, NULL);
-
-		memset(&rmp->remote_item, 0,
-			sizeof(struct smp2p_smem_item));
-		rmp->remote_item.header.magic = SMP2P_MAGIC;
-		SMP2P_SET_LOCAL_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-						SMP2P_REMOTE_MOCK_PROC);
-		SMP2P_SET_REMOTE_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-						SMP2P_APPS_PROC);
-		SMP2P_SET_VERSION(
-		rmp->remote_item.header.feature_version, 1);
-		SMP2P_SET_FEATURES(
-		rmp->remote_item.header.feature_version, 0);
-		SMP2P_SET_ENT_TOTAL(
-		rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
-		SMP2P_SET_ENT_VALID(
-		rmp->remote_item.header.valid_total_ent, 1);
-		rmp->remote_item.header.flags = 0x0;
-		msm_smp2p_set_remote_mock_exists(true);
-
-		/* Create test entry and attach loopback server */
-		rmp->rx_interrupt_count = 0;
-		reinit_completion(&rmp->cb_completion);
-		strlcpy(rmp->remote_item.entries[0].name, "smp2p",
-							SMP2P_MAX_ENTRY_NAME);
-		rmp->remote_item.entries[0].entry = 0;
-		rmp->tx_interrupt();
-
-		local = msm_smp2p_init_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&rmp->cb_completion, HZ / 2),
-			>, 0);
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
-
-		/* Send Echo Command */
-		rmp->rx_interrupt_count = 0;
-		reinit_completion(&rmp->cb_completion);
-		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_ECHO);
-		SMP2P_SET_RMT_DATA(test_request, 10);
-		rmp->remote_item.entries[0].entry = test_request;
-		rmp->tx_interrupt();
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&rmp->cb_completion, HZ / 2),
-			>, 0);
-
-		/* Verify Echo Response */
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-		ret = msm_smp2p_out_read(local,
-							&test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-		test_response = SMP2P_GET_RMT_DATA(test_response);
-		UT_ASSERT_INT(test_response, ==, 10);
-
-		/* Send PINGPONG command */
-		test_request = 0;
-		test_response = 0;
-		rmp->rx_interrupt_count = 0;
-		reinit_completion(&rmp->cb_completion);
-		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_PINGPONG);
-		SMP2P_SET_RMT_DATA(test_request, 10);
-		rmp->remote_item.entries[0].entry = test_request;
-		rmp->tx_interrupt();
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&rmp->cb_completion, HZ / 2),
-			>, 0);
-
-		/* Verify PINGPONG Response */
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-		ret = msm_smp2p_out_read(local, &test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-		test_response = SMP2P_GET_RMT_DATA(test_response);
-		UT_ASSERT_INT(test_response, ==, 9);
-
-		/* Send CLEARALL command */
-		test_request = 0;
-		test_response = 0;
-		rmp->rx_interrupt_count = 0;
-		reinit_completion(&rmp->cb_completion);
-		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_CLEARALL);
-		SMP2P_SET_RMT_DATA(test_request, 10);
-		rmp->remote_item.entries[0].entry = test_request;
-		rmp->tx_interrupt();
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&rmp->cb_completion, HZ / 2),
-			>, 0);
-
-		/* Verify CLEARALL response */
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-		ret = msm_smp2p_out_read(local, &test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-		test_response = SMP2P_GET_RMT_DATA(test_response);
-		UT_ASSERT_INT(test_response, ==, 0);
-
-		ret = msm_smp2p_deinit_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
-		UT_ASSERT_INT(ret, ==, 0);
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-		msm_smp2p_deinit_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
-	}
-}
-
-/**
- * smp2p_ut_remote_inout_core - Verify inbound/outbound functionality.
- *
- * @s: pointer to output file
- * @remote_pid:  Remote processor to test
- *
- * This test verifies inbound/outbound functionality for the remote processor.
- */
-static void smp2p_ut_remote_inout_core(struct seq_file *s, int remote_pid)
-{
-	int failed = 0;
-	struct msm_smp2p_out *handle;
-	int ret;
-	uint32_t test_request;
-	uint32_t test_response = 0;
-	static struct mock_cb_data cb_out;
-	static struct mock_cb_data cb_in;
-
-	seq_printf(s, "Running %s for '%s' remote pid %d\n",
-		   __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-	mock_cb_data_init(&cb_out);
-	mock_cb_data_init(&cb_in);
-	do {
-		/* Open output entry */
-		ret = msm_smp2p_out_open(remote_pid, "smp2p",
-			&cb_out.nb, &handle);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_out.cb_completion, HZ / 2),
-			>, 0);
-		UT_ASSERT_INT(cb_out.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_out.event_open, ==, 1);
-
-		/* Open inbound entry */
-		ret = msm_smp2p_in_register(remote_pid, "smp2p",
-				&cb_in.nb);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_in.cb_completion, HZ / 2),
-			>, 0);
-		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_in.event_open, ==, 1);
-
-		/* Write an echo request */
-		mock_cb_data_reset(&cb_out);
-		mock_cb_data_reset(&cb_in);
-		test_request = 0x0;
-		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_ECHO);
-		SMP2P_SET_RMT_DATA(test_request, 0xAA55);
-		ret = msm_smp2p_out_write(handle, test_request);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		/* Verify inbound reply */
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_in.cb_completion, HZ / 2),
-			>, 0);
-		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
-		UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
-			    cb_in.entry_data.current_value), ==, 0xAA55);
-
-		ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
-		UT_ASSERT_INT(SMP2P_LB_CMD_ECHO, ==,
-				SMP2P_GET_RMT_CMD(test_response));
-		UT_ASSERT_INT(0xAA55, ==, SMP2P_GET_RMT_DATA(test_response));
-
-		/* Write a clear all request */
-		mock_cb_data_reset(&cb_in);
-		test_request = 0x0;
-		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_CLEARALL);
-		SMP2P_SET_RMT_DATA(test_request, 0xAA55);
-		ret = msm_smp2p_out_write(handle, test_request);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		/* Verify inbound reply */
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_in.cb_completion, HZ / 2),
-			>, 0);
-		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
-		UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
-			    cb_in.entry_data.current_value), ==, 0x0000);
-
-		ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
-		UT_ASSERT_INT(0x0000, ==, SMP2P_GET_RMT_DATA(test_response));
-
-		/* Write a decrement request */
-		mock_cb_data_reset(&cb_in);
-		test_request = 0x0;
-		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_PINGPONG);
-		SMP2P_SET_RMT_DATA(test_request, 0xAA55);
-		ret = msm_smp2p_out_write(handle, test_request);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		/* Verify inbound reply */
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_in.cb_completion, HZ / 2),
-			>, 0);
-		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
-		UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
-			    cb_in.entry_data.current_value), ==, 0xAA54);
-
-		ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
-		UT_ASSERT_INT(SMP2P_LB_CMD_PINGPONG, ==,
-				SMP2P_GET_RMT_CMD(test_response));
-		UT_ASSERT_INT(0xAA54, ==, SMP2P_GET_RMT_DATA(test_response));
-
-		/* Test the ignore flag */
-		mock_cb_data_reset(&cb_in);
-		test_request = 0x0;
-		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
-		SMP2P_SET_RMT_CMD(test_request, SMP2P_RLPB_IGNORE);
-		SMP2P_SET_RMT_DATA(test_request, 0xAA55);
-		ret = msm_smp2p_out_write(handle, test_request);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		UT_ASSERT_INT(
-			(int)wait_for_completion_timeout(
-					&cb_in.cb_completion, HZ / 2),
-			==, 0);
-		UT_ASSERT_INT(cb_in.cb_count, ==, 0);
-		UT_ASSERT_INT(cb_in.event_entry_update, ==, 0);
-		ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(0xAA54, ==, SMP2P_GET_RMT_DATA(test_response));
-
-		/* Cleanup */
-		ret = msm_smp2p_out_close(&handle);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_PTR(handle, ==, 0);
-		ret = msm_smp2p_in_unregister(remote_pid, "smp2p", &cb_in.nb);
-		UT_ASSERT_INT(ret, ==, 0);
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		if (handle)
-			(void)msm_smp2p_out_close(&handle);
-		(void)msm_smp2p_in_unregister(remote_pid, "smp2p", &cb_in.nb);
-
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-	}
-}
-
-/**
- * smp2p_ut_remote_inout - Verify inbound/outbound functionality for all.
- *
- * @s: pointer to output file
- *
- * This test verifies inbound and outbound functionality for all
- * configured remote processor.
- */
-static void smp2p_ut_remote_inout(struct seq_file *s)
-{
-	struct smp2p_interrupt_config *int_cfg;
-	int pid;
-
-	int_cfg = smp2p_get_interrupt_config();
-	if (!int_cfg) {
-		seq_puts(s, "Remote processor config unavailable\n");
-		return;
-	}
-
-	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
-		if (!int_cfg[pid].is_configured)
-			continue;
-
-		msm_smp2p_deinit_rmt_lpb_proc(pid);
-		smp2p_ut_remote_inout_core(s, pid);
-		msm_smp2p_init_rmt_lpb_proc(pid);
-	}
-}
-
-/**
- * smp2p_ut_remote_out_max_entries_core - Verify open functionality.
- *
- * @s: pointer to output file
- * @remote_pid:  Remote processor for which the test is executed.
- *
- * This test verifies open functionality by creating maximum outbound entries.
- */
-static void smp2p_ut_remote_out_max_entries_core(struct seq_file *s,
-	int remote_pid)
-{
-	int j = 0;
-	int failed = 0;
-	struct msm_smp2p_out *handle[SMP2P_MAX_ENTRY];
-	int ret;
-	static struct mock_cb_data cb_out[SMP2P_MAX_ENTRY];
-	char entry_name[SMP2P_MAX_ENTRY_NAME];
-	int num_created;
-
-	seq_printf(s, "Running %s for '%s' remote pid %d\n",
-		   __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
-	for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
-		handle[j] = NULL;
-		mock_cb_data_init(&cb_out[j]);
-	}
-
-	do {
-		num_created = 0;
-		for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
-			/* Open as many output entries as possible */
-			scnprintf((char *)entry_name, SMP2P_MAX_ENTRY_NAME,
-				"smp2p%d", j);
-			ret = msm_smp2p_out_open(remote_pid, entry_name,
-				&cb_out[j].nb, &handle[j]);
-			if (ret == -ENOMEM)
-				/* hit max number */
-				break;
-			UT_ASSERT_INT(ret, ==, 0);
-			++num_created;
-		}
-		if (failed)
-			break;
-
-		/* verify we created more than 1 entry */
-		UT_ASSERT_INT(num_created, <=, SMP2P_MAX_ENTRY);
-		UT_ASSERT_INT(num_created, >, 0);
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-	}
-
-	/* cleanup */
-	for (j = 0; j < SMP2P_MAX_ENTRY; j++)
-		ret = msm_smp2p_out_close(&handle[j]);
-}
-
-/**
- * smp2p_ut_remote_out_max_entries - Verify open for all configured processors.
- *
- * @s: pointer to output file
- *
- * This test verifies creating max number of entries for
- * all configured remote processor.
- */
-static void smp2p_ut_remote_out_max_entries(struct seq_file *s)
-{
-	struct smp2p_interrupt_config *int_cfg;
-	int pid;
-
-	int_cfg = smp2p_get_interrupt_config();
-	if (!int_cfg) {
-		seq_puts(s, "Remote processor config unavailable\n");
-		return;
-	}
-
-	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
-		if (!int_cfg[pid].is_configured)
-			continue;
-
-		smp2p_ut_remote_out_max_entries_core(s, pid);
-	}
-}
-
-/**
- * smp2p_ut_local_in_max_entries - Verify registering and unregistering.
- *
- * @s: pointer to output file
- *
- * This test verifies registering and unregistering for inbound entries using
- * the remote mock processor.
- */
-static void smp2p_ut_local_in_max_entries(struct seq_file *s)
-{
-	int j = 0;
-	int failed = 0;
-	struct msm_smp2p_remote_mock *rmp = NULL;
-	int ret;
-	static struct mock_cb_data cb_in[SMP2P_MAX_ENTRY];
-	static struct mock_cb_data cb_out;
-
-	seq_printf(s, "Running %s\n", __func__);
-
-	for (j = 0; j < SMP2P_MAX_ENTRY; j++)
-		mock_cb_data_init(&cb_in[j]);
-
-	mock_cb_data_init(&cb_out);
-
-	do {
-		/* Initialize mock edge */
-		ret = smp2p_reset_mock_edge();
-		UT_ASSERT_INT(ret, ==, 0);
-
-		rmp = msm_smp2p_get_remote_mock();
-		UT_ASSERT_PTR(rmp, !=, NULL);
-
-		rmp->rx_interrupt_count = 0;
-		memset(&rmp->remote_item, 0,
-			sizeof(struct smp2p_smem_item));
-		rmp->remote_item.header.magic = SMP2P_MAGIC;
-		SMP2P_SET_LOCAL_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-						SMP2P_REMOTE_MOCK_PROC);
-		SMP2P_SET_REMOTE_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-						SMP2P_APPS_PROC);
-		SMP2P_SET_VERSION(
-		rmp->remote_item.header.feature_version, 1);
-		SMP2P_SET_FEATURES(
-		rmp->remote_item.header.feature_version, 0);
-		SMP2P_SET_ENT_TOTAL(
-		rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
-		SMP2P_SET_ENT_VALID(
-		rmp->remote_item.header.valid_total_ent, 0);
-		rmp->remote_item.header.flags = 0x0;
-		msm_smp2p_set_remote_mock_exists(true);
-
-		/* Create Max Entries in the remote mock object */
-		for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
-			scnprintf(rmp->remote_item.entries[j].name,
-				SMP2P_MAX_ENTRY_NAME, "smp2p%d", j);
-			rmp->remote_item.entries[j].entry = 0;
-			rmp->tx_interrupt();
-		}
-
-		/* Register for in entries */
-		for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
-			ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
-				rmp->remote_item.entries[j].name,
-				&(cb_in[j].nb));
-			UT_ASSERT_INT(ret, ==, 0);
-			UT_ASSERT_INT(
-				(int)wait_for_completion_timeout(
-					&(cb_in[j].cb_completion), HZ / 2),
-				>, 0);
-			UT_ASSERT_INT(cb_in[j].cb_count, ==, 1);
-			UT_ASSERT_INT(cb_in[j].event_entry_update, ==, 0);
-		}
-		UT_ASSERT_INT(j, ==, SMP2P_MAX_ENTRY);
-
-		/* Unregister */
-		for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
-			ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-				rmp->remote_item.entries[j].name,
-				&(cb_in[j].nb));
-		    UT_ASSERT_INT(ret, ==, 0);
-		}
-		UT_ASSERT_INT(j, ==, SMP2P_MAX_ENTRY);
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-
-		for (j = 0; j < SMP2P_MAX_ENTRY; j++)
-			ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-				rmp->remote_item.entries[j].name,
-				&(cb_in[j].nb));
-	}
-}
-
-/**
- * smp2p_ut_local_in_multiple - Verify Multiple Inbound Registration.
- *
- * @s: pointer to output file
- *
- * This test verifies multiple clients registering for same inbound entries
- * using the remote mock processor.
- */
-static void smp2p_ut_local_in_multiple(struct seq_file *s)
-{
-	int failed = 0;
-	struct msm_smp2p_remote_mock *rmp = NULL;
-	int ret;
-	static struct mock_cb_data cb_in_1;
-	static struct mock_cb_data cb_in_2;
-	static struct mock_cb_data cb_out;
-
-	seq_printf(s, "Running %s\n", __func__);
-
-	mock_cb_data_init(&cb_in_1);
-	mock_cb_data_init(&cb_in_2);
-	mock_cb_data_init(&cb_out);
-
-	do {
-		/* Initialize mock edge */
-		ret = smp2p_reset_mock_edge();
-		UT_ASSERT_INT(ret, ==, 0);
-
-		rmp = msm_smp2p_get_remote_mock();
-		UT_ASSERT_PTR(rmp, !=, NULL);
-
-		rmp->rx_interrupt_count = 0;
-		memset(&rmp->remote_item, 0,
-			sizeof(struct smp2p_smem_item));
-		rmp->remote_item.header.magic = SMP2P_MAGIC;
-		SMP2P_SET_LOCAL_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-						SMP2P_REMOTE_MOCK_PROC);
-		SMP2P_SET_REMOTE_PID(
-		rmp->remote_item.header.rem_loc_proc_id,
-						SMP2P_APPS_PROC);
-		SMP2P_SET_VERSION(
-		rmp->remote_item.header.feature_version, 1);
-		SMP2P_SET_FEATURES(
-		rmp->remote_item.header.feature_version, 0);
-		SMP2P_SET_ENT_TOTAL(
-		rmp->remote_item.header.valid_total_ent, 1);
-		SMP2P_SET_ENT_VALID(
-		rmp->remote_item.header.valid_total_ent, 0);
-		rmp->remote_item.header.flags = 0x0;
-		msm_smp2p_set_remote_mock_exists(true);
-
-		/* Create an Entry in the remote mock object */
-		scnprintf(rmp->remote_item.entries[0].name,
-				SMP2P_MAX_ENTRY_NAME, "smp2p%d", 1);
-		rmp->remote_item.entries[0].entry = 0;
-		rmp->tx_interrupt();
-
-		/* Register multiple clients for the inbound entry */
-		ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
-				rmp->remote_item.entries[0].name,
-				&cb_in_1.nb);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(
-				(int)wait_for_completion_timeout(
-				&(cb_in_1.cb_completion), HZ / 2),
-				>, 0);
-		UT_ASSERT_INT(cb_in_1.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_in_1.event_entry_update, ==, 0);
-
-		ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
-				rmp->remote_item.entries[0].name,
-				&cb_in_2.nb);
-		UT_ASSERT_INT(ret, ==, 0);
-		UT_ASSERT_INT(
-				(int)wait_for_completion_timeout(
-				&(cb_in_2.cb_completion), HZ / 2),
-				>, 0);
-		UT_ASSERT_INT(cb_in_2.cb_count, ==, 1);
-		UT_ASSERT_INT(cb_in_2.event_entry_update, ==, 0);
-
-
-		/* Unregister the clients */
-		ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-				rmp->remote_item.entries[0].name,
-				&(cb_in_1.nb));
-		UT_ASSERT_INT(ret, ==, 0);
-
-		ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-				rmp->remote_item.entries[0].name,
-				&(cb_in_2.nb));
-		UT_ASSERT_INT(ret, ==, 0);
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-
-		ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-				rmp->remote_item.entries[0].name,
-				&(cb_in_1.nb));
-
-		ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
-				rmp->remote_item.entries[0].name,
-				&(cb_in_2.nb));
-	}
-}
-
-/**
- * smp2p_ut_local_ssr_ack - Verify SSR Done/ACK Feature
- *
- * @s: pointer to output file
- */
-static void smp2p_ut_local_ssr_ack(struct seq_file *s)
-{
-	int failed = 0;
-	struct msm_smp2p_remote_mock *rmp = NULL;
-	int ret;
-
-	seq_printf(s, "Running %s\n", __func__);
-	do {
-		struct smp2p_smem *rhdr;
-		struct smp2p_smem *lhdr;
-		int negotiation_state;
-
-		/* initialize v1 without SMP2P_FEATURE_SSR_ACK enabled */
-		ret = smp2p_reset_mock_edge();
-		UT_ASSERT_INT(ret, ==, 0);
-		rmp = msm_smp2p_get_remote_mock();
-		UT_ASSERT_PTR(rmp, !=, NULL);
-		rhdr = &rmp->remote_item.header;
-
-		rmp->rx_interrupt_count = 0;
-		memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
-		rhdr->magic = SMP2P_MAGIC;
-		SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
-				SMP2P_REMOTE_MOCK_PROC);
-		SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
-		SMP2P_SET_VERSION(rhdr->feature_version, 1);
-		SMP2P_SET_FEATURES(rhdr->feature_version, 0);
-		SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
-		SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
-		rhdr->flags = 0x0;
-		msm_smp2p_set_remote_mock_exists(true);
-		rmp->tx_interrupt();
-
-		/* verify edge is open */
-		lhdr = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
-					&negotiation_state);
-		UT_ASSERT_PTR(NULL, !=, lhdr);
-		UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENED);
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-		/* verify no response to ack feature */
-		rmp->rx_interrupt_count = 0;
-		SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
-		rmp->tx_interrupt();
-		UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
-		UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 0);
-
-		/* initialize v1 with SMP2P_FEATURE_SSR_ACK enabled */
-		ret = smp2p_reset_mock_edge();
-		UT_ASSERT_INT(ret, ==, 0);
-		rmp = msm_smp2p_get_remote_mock();
-		UT_ASSERT_PTR(rmp, !=, NULL);
-		rhdr = &rmp->remote_item.header;
-
-		rmp->rx_interrupt_count = 0;
-		memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
-		rhdr->magic = SMP2P_MAGIC;
-		SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
-				SMP2P_REMOTE_MOCK_PROC);
-		SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
-		SMP2P_SET_VERSION(rhdr->feature_version, 1);
-		SMP2P_SET_FEATURES(rhdr->feature_version,
-				SMP2P_FEATURE_SSR_ACK);
-		SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
-		SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
-		rmp->rx_interrupt_count = 0;
-		rhdr->flags = 0x0;
-		msm_smp2p_set_remote_mock_exists(true);
-		rmp->tx_interrupt();
-
-		/* verify edge is open */
-		lhdr = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
-					&negotiation_state);
-		UT_ASSERT_PTR(NULL, !=, lhdr);
-		UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENED);
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-		/* verify response to ack feature */
-		rmp->rx_interrupt_count = 0;
-		SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
-		rmp->tx_interrupt();
-		UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
-		UT_ASSERT_INT(1, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-		rmp->rx_interrupt_count = 0;
-		SMP2P_SET_RESTART_DONE(rhdr->flags, 0);
-		rmp->tx_interrupt();
-		UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
-		UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
-		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-	}
-}
-
-/**
- * get_ssr_name_for_proc - Retrieve an SSR name from the provided list
- *
- * @names:	List of possible processor names
- * @name_len:	The length of @names
- * @index:	Index into @names
- *
- * Return: Pointer to the next processor name, NULL in error conditions
- */
-static char *get_ssr_name_for_proc(char * const names[], size_t name_len,
-				   int index)
-{
-	if (index >= name_len) {
-		pr_err("%s: SSR failed; check subsys name table\n",
-				__func__);
-		return NULL;
-	}
-
-	return names[index];
-}
-
-/**
- * smp2p_ut_local_ssr_ack - Verify SSR Done/ACK Feature
- *
- * @s: pointer to output file
- * @rpid: Remote processor ID
- * @int_cfg: Interrupt config
- */
-static void smp2p_ut_remotesubsys_ssr_ack(struct seq_file *s, uint32_t rpid,
-		struct smp2p_interrupt_config *int_cfg)
-{
-	int failed = 0;
-
-	seq_printf(s, "Running %s\n", __func__);
-	do {
-		struct smp2p_smem *rhdr;
-		struct smp2p_smem *lhdr;
-		int negotiation_state;
-		int name_index;
-		int ret;
-		uint32_t ssr_done_start;
-		bool ssr_ack_enabled = false;
-		bool ssr_success = false;
-		char *name = NULL;
-
-		static char * const mpss_names[] = {"modem", "mpss"};
-		static char * const lpass_names[] = {"adsp", "lpass"};
-		static char * const sensor_names[] = {"slpi", "dsps"};
-		static char * const wcnss_names[] = {"wcnss"};
-
-		lhdr = smp2p_get_out_item(rpid, &negotiation_state);
-		UT_ASSERT_PTR(NULL, !=, lhdr);
-		UT_ASSERT_INT(SMP2P_EDGE_STATE_OPENED, ==, negotiation_state);
-
-		rhdr = smp2p_get_in_item(rpid);
-		UT_ASSERT_PTR(NULL, !=, rhdr);
-
-		/* get initial state of SSR flags */
-		if (SMP2P_GET_FEATURES(rhdr->feature_version)
-				& SMP2P_FEATURE_SSR_ACK)
-			ssr_ack_enabled = true;
-		else
-			ssr_ack_enabled = false;
-
-		ssr_done_start = SMP2P_GET_RESTART_DONE(rhdr->flags);
-		UT_ASSERT_INT(ssr_done_start, ==,
-				SMP2P_GET_RESTART_ACK(lhdr->flags));
-
-		/* trigger restart */
-		name_index = 0;
-		while (!ssr_success) {
-
-			switch (rpid) {
-			case SMP2P_MODEM_PROC:
-				name = get_ssr_name_for_proc(mpss_names,
-						ARRAY_SIZE(mpss_names),
-						name_index);
-				break;
-			case SMP2P_AUDIO_PROC:
-				name = get_ssr_name_for_proc(lpass_names,
-						ARRAY_SIZE(lpass_names),
-						name_index);
-				break;
-			case SMP2P_SENSOR_PROC:
-				name = get_ssr_name_for_proc(sensor_names,
-						ARRAY_SIZE(sensor_names),
-						name_index);
-				break;
-			case SMP2P_WIRELESS_PROC:
-				name = get_ssr_name_for_proc(wcnss_names,
-						ARRAY_SIZE(wcnss_names),
-						name_index);
-				break;
-			default:
-				pr_err("%s: Invalid proc ID %d given for ssr\n",
-						__func__, rpid);
-			}
-
-			if (!name) {
-				seq_puts(s, "\tSSR failed; check subsys name table\n");
-				failed = true;
-				break;
-			}
-
-			seq_printf(s, "Restarting '%s'\n", name);
-			ret = subsystem_restart(name);
-			if (ret == -ENODEV) {
-				seq_puts(s, "\tSSR call failed\n");
-				++name_index;
-				continue;
-			}
-			ssr_success = true;
-		}
-		if (failed)
-			break;
-
-		msleep(10*1000);
-
-		/* verify ack signaling */
-		if (ssr_ack_enabled) {
-			ssr_done_start ^= 1;
-			UT_ASSERT_INT(ssr_done_start, ==,
-					SMP2P_GET_RESTART_ACK(lhdr->flags));
-			UT_ASSERT_INT(ssr_done_start, ==,
-					SMP2P_GET_RESTART_DONE(rhdr->flags));
-			UT_ASSERT_INT(0, ==,
-					SMP2P_GET_RESTART_DONE(lhdr->flags));
-			seq_puts(s, "\tSSR ACK Enabled and Toggled\n");
-		} else {
-			UT_ASSERT_INT(0, ==,
-					SMP2P_GET_RESTART_DONE(lhdr->flags));
-			UT_ASSERT_INT(0, ==,
-					SMP2P_GET_RESTART_ACK(lhdr->flags));
-
-			UT_ASSERT_INT(0, ==,
-					SMP2P_GET_RESTART_DONE(rhdr->flags));
-			UT_ASSERT_INT(0, ==,
-					SMP2P_GET_RESTART_ACK(rhdr->flags));
-			seq_puts(s, "\tSSR ACK Disabled\n");
-		}
-
-		seq_puts(s, "\tOK\n");
-	} while (0);
-
-	if (failed) {
-		pr_err("%s: Failed\n", __func__);
-		seq_puts(s, "\tFailed\n");
-	}
-}
-
-/**
- * smp2p_ut_remote_ssr_ack - Verify SSR Done/ACK Feature
- *
- * @s: pointer to output file
- *
- * Triggers SSR for each subsystem.
- */
-static void smp2p_ut_remote_ssr_ack(struct seq_file *s)
-{
-	struct smp2p_interrupt_config *int_cfg;
-	int pid;
-
-	int_cfg = smp2p_get_interrupt_config();
-	if (!int_cfg) {
-		seq_puts(s,
-			"Remote processor config unavailable\n");
-		return;
-	}
-
-	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
-		if (!int_cfg[pid].is_configured)
-			continue;
-
-		msm_smp2p_deinit_rmt_lpb_proc(pid);
-		smp2p_ut_remotesubsys_ssr_ack(s, pid, &int_cfg[pid]);
-		msm_smp2p_init_rmt_lpb_proc(pid);
-	}
-}
-
-static struct dentry *dent;
-
-static int debugfs_show(struct seq_file *s, void *data)
-{
-	void (*show)(struct seq_file *) = s->private;
-
-	show(s);
-
-	return 0;
-}
-
-static int debug_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, debugfs_show, inode->i_private);
-}
-
-static const struct file_operations debug_ops = {
-	.open = debug_open,
-	.release = single_release,
-	.read = seq_read,
-	.llseek = seq_lseek,
-};
-
-void smp2p_debug_create(const char *name,
-			 void (*show)(struct seq_file *))
-{
-	struct dentry *file;
-
-	file = debugfs_create_file(name, 0444, dent, show, &debug_ops);
-	if (!file)
-		pr_err("%s: unable to create file '%s'\n", __func__, name);
-}
-
-void smp2p_debug_create_u32(const char *name, uint32_t *value)
-{
-	struct dentry *file;
-
-	file = debugfs_create_u32(name, 0644, dent, value);
-	if (!file)
-		pr_err("%s: unable to create file '%s'\n", __func__, name);
-}
-
-static int __init smp2p_debugfs_init(void)
-{
-	dent = debugfs_create_dir("smp2p_test", 0);
-	if (IS_ERR(dent))
-		return PTR_ERR(dent);
-
-	/*
-	 * Add Unit Test entries.
-	 *
-	 * The idea with unit tests is that you can run all of them
-	 * from ADB shell by doing:
-	 *  adb shell
-	 *  cat ut*
-	 *
-	 * And if particular tests fail, you can then repeatedly run the
-	 * failing tests as you debug and resolve the failing test.
-	 */
-	smp2p_debug_create("ut_local_basic",
-			smp2p_ut_local_basic);
-	smp2p_debug_create("ut_local_late_open",
-			smp2p_ut_local_late_open);
-	smp2p_debug_create("ut_local_early_open",
-			smp2p_ut_local_early_open);
-	smp2p_debug_create("ut_mock_loopback",
-			smp2p_ut_mock_loopback);
-	smp2p_debug_create("ut_remote_inout",
-			smp2p_ut_remote_inout);
-	smp2p_debug_create("ut_local_in_max_entries",
-		smp2p_ut_local_in_max_entries);
-	smp2p_debug_create("ut_remote_out_max_entries",
-			smp2p_ut_remote_out_max_entries);
-	smp2p_debug_create("ut_local_in_multiple",
-			smp2p_ut_local_in_multiple);
-	smp2p_debug_create("ut_local_ssr_ack",
-			smp2p_ut_local_ssr_ack);
-	smp2p_debug_create("ut_remote_ssr_ack",
-			smp2p_ut_remote_ssr_ack);
-
-	return 0;
-}
-module_init(smp2p_debugfs_init);
diff --git a/drivers/soc/qcom/smp2p_test_common.h b/drivers/soc/qcom/smp2p_test_common.h
deleted file mode 100644
index 0d22fec..0000000
--- a/drivers/soc/qcom/smp2p_test_common.h
+++ /dev/null
@@ -1,214 +0,0 @@
-/* drivers/soc/qcom/smp2p_test_common.h
- *
- * Copyright (c) 2013-2014,2016 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#ifndef _SMP2P_TEST_COMMON_H_
-#define _SMP2P_TEST_COMMON_H_
-
-#include <linux/debugfs.h>
-
-/**
- * Unit test assertion for logging test cases.
- *
- * @a lval
- * @b rval
- * @cmp comparison operator
- *
- * Assertion fails if (@a cmp @b) is not true which then
- * logs the function and line number where the error occurred
- * along with the values of @a and @b.
- *
- * Assumes that the following local variables exist:
- * @s - sequential output file pointer
- * @failed - set to true if test fails
- */
-#define UT_ASSERT_INT(a, cmp, b) \
-	{ \
-	int a_tmp = (a); \
-	int b_tmp = (b); \
-	if (!((a_tmp)cmp(b_tmp))) { \
-		seq_printf(s, "%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
-				__func__, __LINE__, \
-				a_tmp, b_tmp); \
-		failed = 1; \
-		break; \
-	} \
-	}
-
-#define UT_ASSERT_PTR(a, cmp, b) \
-	{ \
-	void *a_tmp = (a); \
-	void *b_tmp = (b); \
-	if (!((a_tmp)cmp(b_tmp))) { \
-		seq_printf(s, "%s:%d Fail: " #a "(%pK) " #cmp \
-				" " #b "(%pK)\n", \
-				__func__, __LINE__, \
-				a_tmp, b_tmp); \
-		failed = 1; \
-		break; \
-	} \
-	}
-
-#define UT_ASSERT_UINT(a, cmp, b) \
-	{ \
-	unsigned int a_tmp = (a); \
-	unsigned int b_tmp = (b); \
-	if (!((a_tmp)cmp(b_tmp))) { \
-		seq_printf(s, "%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
-				__func__, __LINE__, \
-				a_tmp, b_tmp); \
-		failed = 1; \
-		break; \
-	} \
-	}
-
-#define UT_ASSERT_HEX(a, cmp, b) \
-	{ \
-	unsigned int a_tmp = (a); \
-	unsigned int b_tmp = (b); \
-	if (!((a_tmp)cmp(b_tmp))) { \
-		seq_printf(s, "%s:%d Fail: " #a "(%x) " #cmp " " #b "(%x)\n", \
-				__func__, __LINE__, \
-				a_tmp, b_tmp); \
-		failed = 1; \
-		break; \
-	} \
-	}
-
-/**
- * In-range unit test assertion for test cases.
- *
- * @a lval
- * @minv Minimum value
- * @maxv Maximum value
- *
- * Assertion fails if @a is not on the exclusive range minv, maxv
- * ((@a < @minv) or (@a > @maxv)).  In the failure case, the macro
- * logs the function and line number where the error occurred along
- * with the values of @a and @minv, @maxv.
- *
- * Assumes that the following local variables exist:
- * @s - sequential output file pointer
- * @failed - set to true if test fails
- */
-#define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
-	{ \
-	int a_tmp = (a); \
-	int minv_tmp = (minv); \
-	int maxv_tmp = (maxv); \
-	if (((a_tmp) < (minv_tmp)) || ((a_tmp) > (maxv_tmp))) { \
-		seq_printf(s, "%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
-				 #a "(%d) > " #maxv "(%d)\n", \
-				__func__, __LINE__, \
-				a_tmp, minv_tmp, a_tmp, maxv_tmp); \
-		failed = 1; \
-		break; \
-	} \
-	}
-
-/* Structure to track state changes for the notifier callback. */
-struct mock_cb_data {
-	bool initialized;
-	spinlock_t lock;
-	struct notifier_block nb;
-
-	/* events */
-	struct completion cb_completion;
-	int cb_count;
-	int event_open;
-	int event_entry_update;
-	struct msm_smp2p_update_notif entry_data;
-};
-
-void smp2p_debug_create(const char *name, void (*show)(struct seq_file *));
-void smp2p_debug_create_u32(const char *name, uint32_t *value);
-static inline int smp2p_test_notify(struct notifier_block *self,
-	unsigned long event, void *data);
-
-/**
- * Reset mock callback data to default values.
- *
- * @cb:  Mock callback data
- */
-static inline void mock_cb_data_reset(struct mock_cb_data *cb)
-{
-	reinit_completion(&cb->cb_completion);
-	cb->cb_count = 0;
-	cb->event_open = 0;
-	cb->event_entry_update = 0;
-	memset(&cb->entry_data, 0,
-		sizeof(struct msm_smp2p_update_notif));
-}
-
-
-/**
- * Initialize mock callback data.
- *
- * @cb:  Mock callback data
- */
-static inline void mock_cb_data_init(struct mock_cb_data *cb)
-{
-	if (!cb->initialized) {
-		init_completion(&cb->cb_completion);
-		spin_lock_init(&cb->lock);
-		cb->initialized = true;
-		cb->nb.notifier_call = smp2p_test_notify;
-		memset(&cb->entry_data, 0,
-			sizeof(struct msm_smp2p_update_notif));
-	}
-	mock_cb_data_reset(cb);
-}
-
-/**
- * Notifier function passed into SMP2P for testing.
- *
- * @self:       Pointer to calling notifier block
- * @event:	    Event
- * @data:       Event-specific data
- * @returns:    0
- */
-static inline int smp2p_test_notify(struct notifier_block *self,
-		unsigned long event, void *data)
-{
-	struct mock_cb_data *cb_data_ptr;
-	unsigned long flags;
-
-	cb_data_ptr = container_of(self, struct mock_cb_data, nb);
-
-	spin_lock_irqsave(&cb_data_ptr->lock, flags);
-
-	switch (event) {
-	case SMP2P_OPEN:
-		++cb_data_ptr->event_open;
-		if (data) {
-			cb_data_ptr->entry_data =
-			*(struct msm_smp2p_update_notif *)(data);
-		}
-		break;
-	case SMP2P_ENTRY_UPDATE:
-		++cb_data_ptr->event_entry_update;
-		if (data) {
-			cb_data_ptr->entry_data =
-			*(struct msm_smp2p_update_notif *)(data);
-		}
-		break;
-	default:
-		pr_err("%s Unknown event\n", __func__);
-		break;
-	}
-
-	++cb_data_ptr->cb_count;
-	complete(&cb_data_ptr->cb_completion);
-	spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
-	return 0;
-}
-#endif /* _SMP2P_TEST_COMMON_H_ */
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 7d9a2e8..b29e60d 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -59,6 +59,8 @@
 #define SPMI_OWNERSHIP_TABLE_REG(N)	(0x0700 + (4 * (N)))
 #define SPMI_OWNERSHIP_PERIPH2OWNER(X)	((X) & 0x7)
 
+#define SPMI_PROTOCOL_IRQ_STATUS	0x6000
+
 /* Channel Status fields */
 enum pmic_arb_chnl_status {
 	PMIC_ARB_STATUS_DONE	= BIT(0),
@@ -148,6 +150,8 @@ struct apid_data {
  * @ver_ops:		version dependent operations.
  * @ppid_to_apid	in-memory copy of PPID -> channel (APID) mapping table.
  *			v2 only.
+ * @ahb_bus_wa:		Use AHB bus workaround to avoid write transaction
+ *			corruption on some PMIC arbiter v5 platforms.
  */
 struct spmi_pmic_arb {
 	void __iomem		*rd_base;
@@ -172,6 +176,7 @@ struct spmi_pmic_arb {
 	u16			*ppid_to_apid;
 	u16			last_apid;
 	struct apid_data	apid_data[PMIC_ARB_MAX_PERIPHS];
+	bool			ahb_bus_wa;
 };
 
 /**
@@ -217,6 +222,16 @@ struct pmic_arb_ver_ops {
 static inline void pmic_arb_base_write(struct spmi_pmic_arb *pa,
 				       u32 offset, u32 val)
 {
+	if (pa->ahb_bus_wa) {
+		/* AHB bus register dummy read for workaround. */
+		readl_relaxed(pa->cnfg + SPMI_PROTOCOL_IRQ_STATUS);
+		/*
+		 * Ensure that the read completes before initiating the
+		 * subsequent register write.
+		 */
+		mb();
+	}
+
 	writel_relaxed(val, pa->wr_base + offset);
 }
 
@@ -1343,6 +1358,9 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 
 	pa->ee = ee;
 
+	pa->ahb_bus_wa = of_property_read_bool(pdev->dev.of_node,
+					"qcom,enable-ahb-bus-workaround");
+
 	pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
 					sizeof(*pa->mapping_table), GFP_KERNEL);
 	if (!pa->mapping_table) {
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 3cacc0d..7663e3c 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -522,6 +522,7 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
 		if (!ret && *temp < crit_temp)
 			*temp = tz->emul_temperature;
 	}
+	trace_thermal_query_temp(tz, *temp);
 	mutex_unlock(&tz->lock);
 exit:
 	return ret;
@@ -2586,9 +2587,11 @@ static int thermal_pm_notify(struct notifier_block *nb,
 	case PM_POST_SUSPEND:
 		atomic_set(&in_suspend, 0);
 		list_for_each_entry(tz, &thermal_tz_list, node) {
+			mutex_lock(&tz->lock);
 			thermal_zone_device_reset(tz);
-			thermal_zone_device_update(tz,
-						   THERMAL_EVENT_UNSPECIFIED);
+			mod_delayed_work(system_freezable_power_efficient_wq,
+						&tz->poll_queue, 0);
+			mutex_unlock(&tz->lock);
 		}
 		break;
 	default:
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index bac9975..626cfdc 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1091,6 +1091,18 @@
 	select SERIAL_CORE_CONSOLE
 	select SERIAL_EARLYCON
 
+config SERIAL_MSM_HS
+	tristate "MSM UART High Speed: Serial Driver"
+	depends on ARCH_QCOM
+	select SERIAL_CORE
+	help
+	  If you have a machine based on MSM family of SoCs, you
+	  can enable its onboard high speed serial port by enabling
+	  this option.
+
+	  Choose M here to compile it as a module. The module will be
+	  called msm_serial_hs.
+
 config SERIAL_VT8500
 	bool "VIA VT8500 on-chip serial port support"
 	depends on ARCH_VT8500
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index b39165b..1bdc7f88 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -62,6 +62,7 @@
 obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
 obj-$(CONFIG_SERIAL_MSM_GENI) += msm_geni_serial.o
+obj-$(CONFIG_SERIAL_MSM_HS) += msm_serial_hs.o
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
 obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
 obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 79788eb..af119f4 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1428,9 +1428,7 @@ static void msm_geni_serial_shutdown(struct uart_port *uport)
 	msm_geni_serial_stop_rx(uport);
 	spin_unlock_irqrestore(&uport->lock, flags);
 
-	if (uart_console(uport)) {
-		se_geni_resources_off(&msm_port->serial_rsc);
-	} else {
+	if (!uart_console(uport)) {
 		msm_geni_serial_power_off(uport);
 		if (msm_port->wakeup_irq > 0) {
 			irq_set_irq_wake(msm_port->wakeup_irq, 0);
@@ -2080,6 +2078,21 @@ static void msm_geni_serial_debug_init(struct uart_port *uport)
 		dev_err(uport->dev, "Failed to create dbg dir\n");
 }
 
+static void msm_geni_serial_cons_pm(struct uart_port *uport,
+		unsigned int new_state, unsigned int old_state)
+{
+	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+
+	if (unlikely(!uart_console(uport)))
+		return;
+
+	if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF)
+		se_geni_resources_on(&msm_port->serial_rsc);
+	else if (new_state == UART_PM_STATE_OFF &&
+			old_state == UART_PM_STATE_ON)
+		se_geni_resources_off(&msm_port->serial_rsc);
+}
+
 static const struct uart_ops msm_geni_console_pops = {
 	.tx_empty = msm_geni_serial_tx_empty,
 	.stop_tx = msm_geni_serial_stop_tx,
@@ -2096,6 +2109,7 @@ static const struct uart_ops msm_geni_console_pops = {
 	.poll_get_char	= msm_geni_serial_get_char,
 	.poll_put_char	= msm_geni_serial_poll_put_char,
 #endif
+	.pm = msm_geni_serial_cons_pm,
 };
 
 static const struct uart_ops msm_geni_serial_pops = {
@@ -2420,7 +2434,6 @@ static int msm_geni_serial_sys_resume_noirq(struct device *dev)
 
 	if (uart_console(uport) &&
 	    console_suspend_enabled && uport->suspended) {
-		se_geni_resources_on(&port->serial_rsc);
 		uart_resume_port((struct uart_driver *)uport->private_data,
 									uport);
 		disable_irq(uport->irq);
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
new file mode 100644
index 0000000..6a05d5b
--- /dev/null
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -0,0 +1,3808 @@
+/* drivers/serial/msm_serial_hs.c
+ *
+ * MSM 7k High speed uart driver
+ *
+ * Copyright (c) 2008 Google Inc.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ * Modified: Nick Pelly <npelly@google.com>
+ *
+ * All source code in this file is licensed under the following license
+ * except where indicated.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * Has optional support for uart power management independent of linux
+ * suspend/resume:
+ *
+ * RX wakeup.
+ * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
+ * UART RX pin). This should only be used if there is not a wakeup
+ * GPIO on the UART CTS, and the first RX byte is known (for example, with the
+ * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
+ * always be lost. RTS will be asserted even while the UART is off in this mode
+ * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
+ */
+
+#include <linux/module.h>
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/tty_flip.h>
+#include <linux/wait.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/wakelock.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/ipc_logging.h>
+#include <asm/irq.h>
+#include <linux/kthread.h>
+
+#include <linux/msm-sps.h>
+#include <linux/platform_data/msm_serial_hs.h>
+#include <linux/msm-bus.h>
+
+#include "msm_serial_hs_hwreg.h"
+#define UART_SPS_CONS_PERIPHERAL 0
+#define UART_SPS_PROD_PERIPHERAL 1
+
+#define IPC_MSM_HS_LOG_STATE_PAGES 2
+#define IPC_MSM_HS_LOG_USER_PAGES 2
+#define IPC_MSM_HS_LOG_DATA_PAGES 3
+#define UART_DMA_DESC_NR 8
+#define BUF_DUMP_SIZE 32
+
+/* If the debug_mask gets set to FATAL_LEV,
+ * a fatal error has happened and further IPC logging
+ * is disabled so that this problem can be detected
+ */
+enum {
+	FATAL_LEV = 0U,
+	ERR_LEV = 1U,
+	WARN_LEV = 2U,
+	INFO_LEV = 3U,
+	DBG_LEV = 4U,
+};
+
+#define MSM_HS_DBG(x...) do { \
+	if (msm_uport->ipc_debug_mask >= DBG_LEV) { \
+		if (msm_uport->ipc_msm_hs_log_ctxt) \
+			ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+	} \
+} while (0)
+
+#define MSM_HS_INFO(x...) do { \
+	if (msm_uport->ipc_debug_mask >= INFO_LEV) {\
+		if (msm_uport->ipc_msm_hs_log_ctxt) \
+			ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+	} \
+} while (0)
+
+/* warnings and errors show up on console always */
+#define MSM_HS_WARN(x...) do { \
+	pr_warn(x); \
+	if (msm_uport->ipc_msm_hs_log_ctxt && \
+			msm_uport->ipc_debug_mask >= WARN_LEV) \
+		ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+} while (0)
+
+/* ERROR condition in the driver sets the hs_serial_debug_mask
+ * to ERR_FATAL level, so that this message can be seen
+ * in IPC logging. Further errors continue to log on the console
+ */
+#define MSM_HS_ERR(x...) do { \
+	pr_err(x); \
+	if (msm_uport->ipc_msm_hs_log_ctxt && \
+			msm_uport->ipc_debug_mask >= ERR_LEV) { \
+		ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+		msm_uport->ipc_debug_mask = FATAL_LEV; \
+	} \
+} while (0)
+
+#define LOG_USR_MSG(ctx, x...) do { \
+	if (ctx) \
+		ipc_log_string(ctx, x); \
+} while (0)
+
+/*
+ * There are 3 different kind of UART Core available on MSM.
+ * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
+ * and BSLP based HSUART.
+ */
+enum uart_core_type {
+	LEGACY_HSUART,
+	GSBI_HSUART,
+	BLSP_HSUART,
+};
+
+enum flush_reason {
+	FLUSH_NONE,
+	FLUSH_DATA_READY,
+	FLUSH_DATA_INVALID,  /* values after this indicate invalid data */
+	FLUSH_IGNORE,
+	FLUSH_STOP,
+	FLUSH_SHUTDOWN,
+};
+
+/*
+ * SPS data structures to support HSUART with BAM
+ * @sps_pipe - This struct defines BAM pipe descriptor
+ * @sps_connect - This struct defines a connection's end point
+ * @sps_register - This struct defines a event registration parameters
+ */
+struct msm_hs_sps_ep_conn_data {
+	struct sps_pipe *pipe_handle;
+	struct sps_connect config;
+	struct sps_register_event event;
+};
+
+struct msm_hs_tx {
+	bool dma_in_flight;    /* tx dma in progress */
+	enum flush_reason flush;
+	wait_queue_head_t wait;
+	int tx_count;
+	dma_addr_t dma_base;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct msm_hs_sps_ep_conn_data cons;
+	struct timer_list tx_timeout_timer;
+	void *ipc_tx_ctxt;
+};
+
+struct msm_hs_rx {
+	enum flush_reason flush;
+	wait_queue_head_t wait;
+	dma_addr_t rbuffer;
+	unsigned char *buffer;
+	unsigned int buffer_pending;
+	struct delayed_work flip_insert_work;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct msm_hs_sps_ep_conn_data prod;
+	unsigned long queued_flag;
+	unsigned long pending_flag;
+	int rx_inx;
+	struct sps_iovec iovec[UART_DMA_DESC_NR]; /* track descriptors */
+	void *ipc_rx_ctxt;
+};
+enum buffer_states {
+	NONE_PENDING = 0x0,
+	FIFO_OVERRUN = 0x1,
+	PARITY_ERROR = 0x2,
+	CHARS_NORMAL = 0x4,
+};
+
+enum msm_hs_pm_state {
+	MSM_HS_PM_ACTIVE,
+	MSM_HS_PM_SUSPENDED,
+	MSM_HS_PM_SYS_SUSPENDED,
+};
+
+/* optional low power wakeup, typically on a GPIO RX irq */
+struct msm_hs_wakeup {
+	int irq;  /* < 0 indicates low power wakeup disabled */
+	unsigned char ignore;  /* bool */
+
+	/* bool: inject char into rx tty on wakeup */
+	bool inject_rx;
+	unsigned char rx_to_inject;
+	bool enabled;
+	bool freed;
+};
+
+struct msm_hs_port {
+	struct uart_port uport;
+	unsigned long imr_reg;  /* shadow value of UARTDM_IMR */
+	struct clk *clk;
+	struct clk *pclk;
+	struct msm_hs_tx tx;
+	struct msm_hs_rx rx;
+	atomic_t resource_count;
+	struct msm_hs_wakeup wakeup;
+
+	struct dentry *loopback_dir;
+	struct work_struct clock_off_w; /* work for actual clock off */
+	struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
+	struct mutex mtx; /* resource access mutex */
+	enum uart_core_type uart_type;
+	unsigned long bam_handle;
+	resource_size_t bam_mem;
+	int bam_irq;
+	unsigned char __iomem *bam_base;
+	unsigned int bam_tx_ep_pipe_index;
+	unsigned int bam_rx_ep_pipe_index;
+	/* struct sps_event_notify is an argument passed when triggering a
+	 * callback event object registered for an SPS connection end point.
+	 */
+	struct sps_event_notify notify;
+	/* bus client handler */
+	u32 bus_perf_client;
+	/* BLSP UART required BUS Scaling data */
+	struct msm_bus_scale_pdata *bus_scale_table;
+	bool rx_bam_inprogress;
+	wait_queue_head_t bam_disconnect_wait;
+	bool use_pinctrl;
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+	bool flow_control;
+	enum msm_hs_pm_state pm_state;
+	atomic_t client_count;
+	bool obs; /* out of band sleep flag */
+	atomic_t client_req_state;
+	void *ipc_msm_hs_log_ctxt;
+	void *ipc_msm_hs_pwr_ctxt;
+	int ipc_debug_mask;
+};
+
+static const struct of_device_id msm_hs_match_table[] = {
+	{ .compatible = "qcom,msm-hsuart-v14"},
+	{}
+};
+
+
+#define MSM_UARTDM_BURST_SIZE 16   /* DM burst size (in bytes) */
+#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
+#define UARTDM_RX_BUF_SIZE 512
+#define RETRY_TIMEOUT 5
+#define UARTDM_NR 256
+#define BAM_PIPE_MIN 0
+#define BAM_PIPE_MAX 11
+#define BUS_SCALING 1
+#define BUS_RESET 0
+#define RX_FLUSH_COMPLETE_TIMEOUT 300 /* In jiffies */
+#define BLSP_UART_CLK_FMAX 63160000
+
+static struct dentry *debug_base;
+static struct platform_driver msm_serial_hs_platform_driver;
+static struct uart_driver msm_hs_driver;
+static const struct uart_ops msm_hs_ops;
+static void msm_hs_start_rx_locked(struct uart_port *uport);
+static void msm_serial_hs_rx_work(struct kthread_work *work);
+static void flip_insert_work(struct work_struct *work);
+static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote);
+static struct msm_hs_port *msm_hs_get_hs_port(int port_index);
+static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport);
+static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport);
+static int msm_hs_pm_resume(struct device *dev);
+
+#define UARTDM_TO_MSM(uart_port) \
+	container_of((uart_port), struct msm_hs_port, uport)
+
+static int msm_hs_ioctl(struct uart_port *uport, unsigned int cmd,
+						unsigned long arg)
+{
+	int ret = 0, state = 1;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (!msm_uport)
+		return -ENODEV;
+
+	switch (cmd) {
+	case MSM_ENABLE_UART_CLOCK: {
+		ret = msm_hs_request_clock_on(&msm_uport->uport);
+		break;
+	}
+	case MSM_DISABLE_UART_CLOCK: {
+		ret = msm_hs_request_clock_off(&msm_uport->uport);
+		break;
+	}
+	case MSM_GET_UART_CLOCK_STATUS: {
+		/* Return value 0 - UART CLOCK is OFF
+		 * Return value 1 - UART CLOCK is ON
+		 */
+
+		if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+			state = 0;
+		ret = state;
+		MSM_HS_INFO("%s():GET UART CLOCK STATUS: cmd=%d state=%d\n",
+			__func__, cmd, state);
+		break;
+	}
+	default: {
+		MSM_HS_INFO("%s():Unknown cmd specified: cmd=%d\n", __func__,
+			   cmd);
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+	}
+
+	return ret;
+}
+
+/*
+ * This function is called initially during probe and then
+ * through the runtime PM framework. The function directly calls
+ * resource APIs to enable them.
+ */
+
+static int msm_hs_clk_bus_vote(struct msm_hs_port *msm_uport)
+{
+	int rc = 0;
+
+	msm_hs_bus_voting(msm_uport, BUS_SCALING);
+	/* Turn on core clk and iface clk */
+	if (msm_uport->pclk) {
+		rc = clk_prepare_enable(msm_uport->pclk);
+		if (rc) {
+			dev_err(msm_uport->uport.dev,
+				"%s: Could not turn on pclk [%d]\n",
+				__func__, rc);
+			goto busreset;
+		}
+	}
+	rc = clk_prepare_enable(msm_uport->clk);
+	if (rc) {
+		dev_err(msm_uport->uport.dev,
+			"%s: Could not turn on core clk [%d]\n",
+			__func__, rc);
+		goto core_unprepare;
+	}
+	MSM_HS_DBG("%s: Clock ON successful\n", __func__);
+	return rc;
+core_unprepare:
+	clk_disable_unprepare(msm_uport->pclk);
+busreset:
+	msm_hs_bus_voting(msm_uport, BUS_RESET);
+	return rc;
+}
+
+/*
+ * This function is called initially during probe and then
+ * through the runtime PM framework. The function directly calls
+ * resource apis to disable them.
+ */
+static void msm_hs_clk_bus_unvote(struct msm_hs_port *msm_uport)
+{
+	clk_disable_unprepare(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_disable_unprepare(msm_uport->pclk);
+	msm_hs_bus_voting(msm_uport, BUS_RESET);
+	MSM_HS_DBG("%s: Clock OFF successful\n", __func__);
+}
+
+ /* Remove vote for resources when done */
+static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	int rc = atomic_read(&msm_uport->resource_count);
+
+	MSM_HS_DBG("%s(): power usage count %d", __func__, rc);
+	if (rc <= 0) {
+		MSM_HS_WARN("%s(): rc zero, bailing\n", __func__);
+		WARN_ON(1);
+		return;
+	}
+	atomic_dec(&msm_uport->resource_count);
+	pm_runtime_mark_last_busy(uport->dev);
+	pm_runtime_put_autosuspend(uport->dev);
+}
+
+ /* Vote for resources before accessing them */
+static void msm_hs_resource_vote(struct msm_hs_port *msm_uport)
+{
+	int ret;
+	struct uart_port *uport = &(msm_uport->uport);
+
+	ret = pm_runtime_get_sync(uport->dev);
+	if (ret < 0 || msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s:%s runtime callback not invoked ret:%d st:%d",
+			__func__, dev_name(uport->dev), ret,
+					msm_uport->pm_state);
+		msm_hs_pm_resume(uport->dev);
+	}
+	atomic_inc(&msm_uport->resource_count);
+}
+
+/* Check if the uport line number matches with user id stored in pdata.
+ * User id information is stored during initialization. This function
+ * ensues that the same device is selected
+ */
+
+static struct msm_hs_port *get_matching_hs_port(struct platform_device *pdev)
+{
+	struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
+	struct msm_hs_port *msm_uport = msm_hs_get_hs_port(pdev->id);
+
+	if ((!msm_uport) || (msm_uport->uport.line != pdev->id
+	   && msm_uport->uport.line != pdata->userid)) {
+		pr_err("uport line number mismatch!");
+		WARN_ON(1);
+		return NULL;
+	}
+
+	return msm_uport;
+}
+
+static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	int state = 1;
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+			state = 0;
+		ret = snprintf(buf, PAGE_SIZE, "%d\n", state);
+	}
+	return ret;
+}
+
+static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	int state;
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		state = buf[0] - '0';
+		switch (state) {
+		case 0:
+			MSM_HS_DBG("%s: Request clock OFF\n", __func__);
+			msm_hs_request_clock_off(&msm_uport->uport);
+			ret = count;
+			break;
+		case 1:
+			MSM_HS_DBG("%s: Request clock ON\n", __func__);
+			msm_hs_request_clock_on(&msm_uport->uport);
+			ret = count;
+			break;
+		default:
+			ret = -EINVAL;
+		}
+	}
+	return ret;
+}
+
+static DEVICE_ATTR(clock, 0644, show_clock, set_clock);
+
+static ssize_t show_debug_mask(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport)
+		ret = snprintf(buf, sizeof(int), "%u\n",
+					msm_uport->ipc_debug_mask);
+	return ret;
+}
+
+static ssize_t set_debug_mask(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		msm_uport->ipc_debug_mask = buf[0] - '0';
+		if (msm_uport->ipc_debug_mask < FATAL_LEV ||
+				msm_uport->ipc_debug_mask > DBG_LEV) {
+			/* set to default level */
+			msm_uport->ipc_debug_mask = INFO_LEV;
+			MSM_HS_ERR("Range is 0 to 4;Set to default level 3\n");
+			return -EINVAL;
+		}
+	}
+	return count;
+}
+
+static DEVICE_ATTR(debug_mask, 0644, show_debug_mask,
+							set_debug_mask);
+
+static inline bool is_use_low_power_wakeup(struct msm_hs_port *msm_uport)
+{
+	return msm_uport->wakeup.irq > 0;
+}
+
+static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote)
+{
+	int ret;
+
+	if (msm_uport->bus_perf_client) {
+		MSM_HS_DBG("Bus voting:%d\n", vote);
+		ret = msm_bus_scale_client_update_request(
+				msm_uport->bus_perf_client, vote);
+		if (ret)
+			MSM_HS_ERR("%s(): Failed for Bus voting: %d\n",
+							__func__, vote);
+	}
+}
+
+static inline unsigned int msm_hs_read(struct uart_port *uport,
+				       unsigned int index)
+{
+	return readl_relaxed(uport->membase + index);
+}
+
+static inline void msm_hs_write(struct uart_port *uport, unsigned int index,
+				 unsigned int value)
+{
+	writel_relaxed(value, uport->membase + index);
+}
+
+static int sps_rx_disconnect(struct sps_pipe *sps_pipe_handler)
+{
+	struct sps_connect config;
+	int ret;
+
+	ret = sps_get_config(sps_pipe_handler, &config);
+	if (ret) {
+		pr_err("%s: sps_get_config() failed ret %d\n", __func__, ret);
+		return ret;
+	}
+	config.options |= SPS_O_POLL;
+	ret = sps_set_config(sps_pipe_handler, &config);
+	if (ret) {
+		pr_err("%s: sps_set_config() failed ret %d\n", __func__, ret);
+		return ret;
+	}
+	return sps_disconnect(sps_pipe_handler);
+}
+
+static void hex_dump_ipc(struct msm_hs_port *msm_uport, void *ipc_ctx,
+			char *prefix, char *string, u64 addr, int size)
+
+{
+	char buf[(BUF_DUMP_SIZE * 3) + 2];
+	int len = 0;
+
+	len = min(size, BUF_DUMP_SIZE);
+	/*
+	 * Print upto 32 data bytes, 32 bytes per line, 1 byte at a time and
+	 * don't include the ASCII text at the end of the buffer.
+	 */
+	hex_dump_to_buffer(string, len, 32, 1, buf, sizeof(buf), false);
+	ipc_log_string(ipc_ctx, "%s[0x%.10x:%d] : %s", prefix,
+					(unsigned int)addr, size, buf);
+}
+
+/*
+ * This API read and provides UART Core registers information.
+ */
+static void dump_uart_hs_registers(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_INFO("%s:Failed clocks are off, resource_count %d",
+			__func__, atomic_read(&msm_uport->resource_count));
+		return;
+	}
+
+	MSM_HS_DBG(
+	"MR1:%x MR2:%x TFWR:%x RFWR:%x DMEN:%x IMR:%x MISR:%x NCF_TX:%x\n",
+	msm_hs_read(uport, UART_DM_MR1),
+	msm_hs_read(uport, UART_DM_MR2),
+	msm_hs_read(uport, UART_DM_TFWR),
+	msm_hs_read(uport, UART_DM_RFWR),
+	msm_hs_read(uport, UART_DM_DMEN),
+	msm_hs_read(uport, UART_DM_IMR),
+	msm_hs_read(uport, UART_DM_MISR),
+	msm_hs_read(uport, UART_DM_NCF_TX));
+	MSM_HS_INFO("SR:%x ISR:%x DMRX:%x RX_SNAP:%x TXFS:%x RXFS:%x\n",
+	msm_hs_read(uport, UART_DM_SR),
+	msm_hs_read(uport, UART_DM_ISR),
+	msm_hs_read(uport, UART_DM_DMRX),
+	msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP),
+	msm_hs_read(uport, UART_DM_TXFS),
+	msm_hs_read(uport, UART_DM_RXFS));
+	MSM_HS_DBG("rx.flush:%u\n", msm_uport->rx.flush);
+}
+
+static int msm_serial_loopback_enable_set(void *data, u64 val)
+{
+	struct msm_hs_port *msm_uport = data;
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned long flags;
+	int ret = 0;
+
+	msm_hs_resource_vote(msm_uport);
+
+	if (val) {
+		spin_lock_irqsave(&uport->lock, flags);
+		ret = msm_hs_read(uport, UART_DM_MR2);
+		ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
+			UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
+		msm_hs_write(uport, UART_DM_MR2, ret);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	} else {
+		spin_lock_irqsave(&uport->lock, flags);
+		ret = msm_hs_read(uport, UART_DM_MR2);
+		ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
+			UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
+		msm_hs_write(uport, UART_DM_MR2, ret);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	}
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
+
+	msm_hs_resource_unvote(msm_uport);
+	return 0;
+}
+
+static int msm_serial_loopback_enable_get(void *data, u64 *val)
+{
+	struct msm_hs_port *msm_uport = data;
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned long flags;
+	int ret = 0;
+
+	msm_hs_resource_vote(msm_uport);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	msm_hs_resource_unvote(msm_uport);
+
+	*val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
+			msm_serial_loopback_enable_set, "%llu\n");
+
+/*
+ * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
+ * writing 1 turns on internal loopback mode in HW. Useful for automation
+ * test scripts.
+ * writing 0 disables the internal loopback mode. Default is disabled.
+ */
+static void msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
+					   int id)
+{
+	char node_name[15];
+
+	snprintf(node_name, sizeof(node_name), "loopback.%d", id);
+	msm_uport->loopback_dir = debugfs_create_file(node_name,
+						0644,
+						debug_base,
+						msm_uport,
+						&loopback_enable_fops);
+
+	if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
+		MSM_HS_ERR("%s(): Cannot create loopback.%d debug entry",
+							__func__, id);
+}
+
+static int msm_hs_remove(struct platform_device *pdev)
+{
+
+	struct msm_hs_port *msm_uport;
+	struct device *dev;
+
+	if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
+		pr_err("Invalid plaform device ID = %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	msm_uport = get_matching_hs_port(pdev);
+	if (!msm_uport)
+		return -EINVAL;
+
+	dev = msm_uport->uport.dev;
+	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
+	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_debug_mask.attr);
+	debugfs_remove(msm_uport->loopback_dir);
+
+	dma_free_coherent(msm_uport->uport.dev,
+			UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
+			msm_uport->rx.buffer, msm_uport->rx.rbuffer);
+
+	msm_uport->rx.buffer = NULL;
+	msm_uport->rx.rbuffer = 0;
+
+	destroy_workqueue(msm_uport->hsuart_wq);
+	mutex_destroy(&msm_uport->mtx);
+
+	uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
+	clk_put(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_put(msm_uport->pclk);
+
+	iounmap(msm_uport->uport.membase);
+
+	return 0;
+}
+
+
+/* Connect a UART peripheral's SPS endpoint(consumer endpoint)
+ *
+ * Also registers a SPS callback function for the consumer
+ * process with the SPS driver
+ *
+ * @uport - Pointer to uart uport structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+
+static int msm_hs_spsconnect_tx(struct msm_hs_port *msm_uport)
+{
+	int ret;
+	struct uart_port *uport = &msm_uport->uport;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
+	struct sps_connect *sps_config = &tx->cons.config;
+	struct sps_register_event *sps_event = &tx->cons.event;
+	unsigned long flags;
+	unsigned int data;
+
+	if (tx->flush != FLUSH_SHUTDOWN) {
+		MSM_HS_ERR("%s:Invalid flush state:%d\n", __func__, tx->flush);
+		return 0;
+	}
+
+	/* Establish connection between peripheral and memory endpoint */
+	ret = sps_connect(sps_pipe_handle, sps_config);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		return ret;
+	}
+	/* Register callback event for EOT (End of transfer) event. */
+	ret = sps_register_event(sps_pipe_handle, sps_event);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		goto reg_event_err;
+	}
+
+	spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+	msm_uport->tx.flush = FLUSH_STOP;
+	spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Enable UARTDM Tx BAM Interface */
+	data |= UARTDM_TX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
+
+	MSM_HS_DBG("%s(): TX Connect", __func__);
+	return 0;
+
+reg_event_err:
+	sps_disconnect(sps_pipe_handle);
+	return ret;
+}
+
+/* Connect a UART peripheral's SPS endpoint(producer endpoint)
+ *
+ * Also registers a SPS callback function for the producer
+ * process with the SPS driver
+ *
+ * @uport - Pointer to uart uport structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+
+static int msm_hs_spsconnect_rx(struct uart_port *uport)
+{
+	int ret;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+	struct sps_connect *sps_config = &rx->prod.config;
+	struct sps_register_event *sps_event = &rx->prod.event;
+	unsigned long flags;
+
+	/* Establish connection between peripheral and memory endpoint */
+	ret = sps_connect(sps_pipe_handle, sps_config);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		return ret;
+	}
+	/* Register callback event for DESC_DONE event. */
+	ret = sps_register_event(sps_pipe_handle, sps_event);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
+		"pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
+		goto reg_event_err;
+	}
+	spin_lock_irqsave(&uport->lock, flags);
+	if (msm_uport->rx.pending_flag)
+		MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
+		__func__, msm_uport->rx.pending_flag);
+	msm_uport->rx.queued_flag = 0;
+	msm_uport->rx.pending_flag = 0;
+	msm_uport->rx.rx_inx = 0;
+	msm_uport->rx.flush = FLUSH_STOP;
+	spin_unlock_irqrestore(&uport->lock, flags);
+	MSM_HS_DBG("%s(): RX Connect\n", __func__);
+	return 0;
+
+reg_event_err:
+	sps_disconnect(sps_pipe_handle);
+	return ret;
+}
+
+/*
+ * programs the UARTDM_CSR register with correct bit rates
+ *
+ * Interrupts should be disabled before we are called, as
+ * we modify Set Baud rate
+ * Set receive stale interrupt level, dependent on Bit Rate
+ * Goal is to have around 8 ms before indicate stale.
+ * roundup (((Bit Rate * .008) / 10) + 1
+ */
+static void msm_hs_set_bps_locked(struct uart_port *uport,
+			       unsigned int bps)
+{
+	unsigned long rxstale;
+	unsigned long data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	switch (bps) {
+	case 300:
+		msm_hs_write(uport, UART_DM_CSR, 0x00);
+		rxstale = 1;
+		break;
+	case 600:
+		msm_hs_write(uport, UART_DM_CSR, 0x11);
+		rxstale = 1;
+		break;
+	case 1200:
+		msm_hs_write(uport, UART_DM_CSR, 0x22);
+		rxstale = 1;
+		break;
+	case 2400:
+		msm_hs_write(uport, UART_DM_CSR, 0x33);
+		rxstale = 1;
+		break;
+	case 4800:
+		msm_hs_write(uport, UART_DM_CSR, 0x44);
+		rxstale = 1;
+		break;
+	case 9600:
+		msm_hs_write(uport, UART_DM_CSR, 0x55);
+		rxstale = 2;
+		break;
+	case 14400:
+		msm_hs_write(uport, UART_DM_CSR, 0x66);
+		rxstale = 3;
+		break;
+	case 19200:
+		msm_hs_write(uport, UART_DM_CSR, 0x77);
+		rxstale = 4;
+		break;
+	case 28800:
+		msm_hs_write(uport, UART_DM_CSR, 0x88);
+		rxstale = 6;
+		break;
+	case 38400:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		rxstale = 8;
+		break;
+	case 57600:
+		msm_hs_write(uport, UART_DM_CSR, 0xaa);
+		rxstale = 16;
+		break;
+	case 76800:
+		msm_hs_write(uport, UART_DM_CSR, 0xbb);
+		rxstale = 16;
+		break;
+	case 115200:
+		msm_hs_write(uport, UART_DM_CSR, 0xcc);
+		rxstale = 31;
+		break;
+	case 230400:
+		msm_hs_write(uport, UART_DM_CSR, 0xee);
+		rxstale = 31;
+		break;
+	case 460800:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	case 4000000:
+	case 3686400:
+	case 3200000:
+	case 3500000:
+	case 3000000:
+	case 2500000:
+	case 2000000:
+	case 1500000:
+	case 1152000:
+	case 1000000:
+	case 921600:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	default:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		/* default to 9600 */
+		bps = 9600;
+		rxstale = 2;
+		break;
+	}
+	/*
+	 * uart baud rate depends on CSR and MND Values
+	 * we are updating CSR before and then calling
+	 * clk_set_rate which updates MND Values. Hence
+	 * dsb requires here.
+	 */
+	mb();
+	if (bps > 460800) {
+		uport->uartclk = bps * 16;
+		/* BLSP based UART supports maximum clock frequency
+		 * of 63.16 Mhz. With this (63.16 Mhz) clock frequency
+		 * UART can support baud rate of 3.94 Mbps which is
+		 * equivalent to 4 Mbps.
+		 * UART hardware is robust enough to handle this
+		 * deviation to achieve baud rate ~4 Mbps.
+		 */
+		if (bps == 4000000)
+			uport->uartclk = BLSP_UART_CLK_FMAX;
+	} else {
+		uport->uartclk = 7372800;
+	}
+
+	if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
+		MSM_HS_WARN("Error setting clock rate on UART\n");
+		WARN_ON(1);
+	}
+
+	data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
+	data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
+
+	msm_hs_write(uport, UART_DM_IPR, data);
+	/*
+	 * It is suggested to do reset of transmitter and receiver after
+	 * changing any protocol configuration. Here Baud rate and stale
+	 * timeout are getting updated. Hence reset transmitter and receiver.
+	 */
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+}
+
+
+static void msm_hs_set_std_bps_locked(struct uart_port *uport,
+			       unsigned int bps)
+{
+	unsigned long rxstale;
+	unsigned long data;
+
+	switch (bps) {
+	case 9600:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		rxstale = 2;
+		break;
+	case 14400:
+		msm_hs_write(uport, UART_DM_CSR, 0xaa);
+		rxstale = 3;
+		break;
+	case 19200:
+		msm_hs_write(uport, UART_DM_CSR, 0xbb);
+		rxstale = 4;
+		break;
+	case 28800:
+		msm_hs_write(uport, UART_DM_CSR, 0xcc);
+		rxstale = 6;
+		break;
+	case 38400:
+		msm_hs_write(uport, UART_DM_CSR, 0xdd);
+		rxstale = 8;
+		break;
+	case 57600:
+		msm_hs_write(uport, UART_DM_CSR, 0xee);
+		rxstale = 16;
+		break;
+	case 115200:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	default:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		/* default to 9600 */
+		bps = 9600;
+		rxstale = 2;
+		break;
+	}
+
+	data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
+	data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
+
+	msm_hs_write(uport, UART_DM_IPR, data);
+}
+
+static void msm_hs_enable_flow_control(struct uart_port *uport, bool override)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	unsigned int data;
+
+	if (msm_uport->flow_control || override) {
+		/* Enable RFR line */
+		msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+		/* Enable auto RFR */
+		data = msm_hs_read(uport, UART_DM_MR1);
+		data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hs_write(uport, UART_DM_MR1, data);
+		/* Ensure register IO completion */
+		mb();
+	}
+}
+
+static void msm_hs_disable_flow_control(struct uart_port *uport, bool override)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	unsigned int data;
+
+	/*
+	 * Clear the Rx Ready Ctl bit - This ensures that
+	 * flow control lines stop the other side from sending
+	 * data while we change the parameters
+	 */
+
+	if (msm_uport->flow_control || override) {
+		data = msm_hs_read(uport, UART_DM_MR1);
+		/* disable auto ready-for-receiving */
+		data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hs_write(uport, UART_DM_MR1, data);
+		/* Disable RFR line */
+		msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
+		/* Ensure register IO completion */
+		mb();
+	}
+}
+
+/*
+ * termios :  new ktermios
+ * oldtermios:  old ktermios previous setting
+ *
+ * Configure the serial port
+ */
+static void msm_hs_set_termios(struct uart_port *uport,
+				   struct ktermios *termios,
+				   struct ktermios *oldtermios)
+{
+	unsigned int bps;
+	unsigned long data;
+	unsigned int c_cflag = termios->c_cflag;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	/**
+	 * set_termios can be invoked from the framework when
+	 * the clocks are off and the client has not had a chance
+	 * to turn them on. Make sure that they are on
+	 */
+	msm_hs_resource_vote(msm_uport);
+	mutex_lock(&msm_uport->mtx);
+	msm_hs_write(uport, UART_DM_IMR, 0);
+
+	msm_hs_disable_flow_control(uport, true);
+
+	/*
+	 * Disable Rx channel of UARTDM
+	 * DMA Rx Stall happens if enqueue and flush of Rx command happens
+	 * concurrently. Hence before changing the baud rate/protocol
+	 * configuration and sending flush command to ADM, disable the Rx
+	 * channel of UARTDM.
+	 * Note: should not reset the receiver here immediately as it is not
+	 * suggested to do disable/reset or reset/disable at the same time.
+	 */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Disable UARTDM RX BAM Interface */
+	data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+
+	/*
+	 * Reset RX and TX.
+	 * Resetting the RX enables it, therefore we must reset and disable.
+	 */
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+
+	/* 300 is the minimum baud support by the driver  */
+	bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
+
+	/* Temporary remapping  200 BAUD to 3.2 mbps */
+	if (bps == 200)
+		bps = 3200000;
+
+	uport->uartclk = clk_get_rate(msm_uport->clk);
+	if (!uport->uartclk)
+		msm_hs_set_std_bps_locked(uport, bps);
+	else
+		msm_hs_set_bps_locked(uport, bps);
+
+	data = msm_hs_read(uport, UART_DM_MR2);
+	data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
+	/* set parity */
+	if (c_cflag & PARENB) {
+		if (c_cflag & PARODD)
+			data |= ODD_PARITY;
+		else if (c_cflag & CMSPAR)
+			data |= SPACE_PARITY;
+		else
+			data |= EVEN_PARITY;
+	}
+
+	/* Set bits per char */
+	data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
+
+	switch (c_cflag & CSIZE) {
+	case CS5:
+		data |= FIVE_BPC;
+		break;
+	case CS6:
+		data |= SIX_BPC;
+		break;
+	case CS7:
+		data |= SEVEN_BPC;
+		break;
+	default:
+		data |= EIGHT_BPC;
+		break;
+	}
+	/* stop bits */
+	if (c_cflag & CSTOPB) {
+		data |= STOP_BIT_TWO;
+	} else {
+		/* otherwise 1 stop bit */
+		data |= STOP_BIT_ONE;
+	}
+	data |= UARTDM_MR2_ERROR_MODE_BMSK;
+	/* write parity/bits per char/stop bit configuration */
+	msm_hs_write(uport, UART_DM_MR2, data);
+
+	uport->ignore_status_mask = termios->c_iflag & INPCK;
+	uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
+	uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
+
+	uport->read_status_mask = (termios->c_cflag & CREAD);
+
+	/* Set Transmit software time out */
+	uart_update_timeout(uport, c_cflag, bps);
+
+	/* Enable UARTDM Rx BAM Interface */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data |= UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
+	/* Issue TX,RX BAM Start IFC command */
+	msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
+	msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+	/* Ensure Register Writes Complete */
+	mb();
+
+	/* Configure HW flow control
+	 * UART Core would see status of CTS line when it is sending data
+	 * to remote uart to confirm that it can receive or not.
+	 * UART Core would trigger RFR if it is not having any space with
+	 * RX FIFO.
+	 */
+	/* Pulling RFR line high */
+	msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+	data = msm_hs_read(uport, UART_DM_MR1);
+	data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
+	if (c_cflag & CRTSCTS) {
+		data |= UARTDM_MR1_CTS_CTL_BMSK;
+		data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_uport->flow_control = true;
+	}
+	msm_hs_write(uport, UART_DM_MR1, data);
+	MSM_HS_INFO("%s: Cflags 0x%x Baud %u\n", __func__, c_cflag, bps);
+
+	mutex_unlock(&msm_uport->mtx);
+
+	msm_hs_resource_unvote(msm_uport);
+}
+
+/*
+ *  Standard API, Transmitter
+ *  Any character in the transmit shift register is sent
+ */
+unsigned int msm_hs_tx_empty(struct uart_port *uport)
+{
+	unsigned int data;
+	unsigned int isr;
+	unsigned int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	data = msm_hs_read(uport, UART_DM_SR);
+	isr = msm_hs_read(uport, UART_DM_ISR);
+	msm_hs_resource_unvote(msm_uport);
+	MSM_HS_INFO("%s(): SR:0x%x ISR:0x%x ", __func__, data, isr);
+
+	if (data & UARTDM_SR_TXEMT_BMSK) {
+		ret = TIOCSER_TEMT;
+	} else
+		/*
+		 * Add an extra sleep here because sometimes the framework's
+		 * delay (based on baud rate) isn't good enough.
+		 * Note that this won't happen during every port close, only
+		 * on select occassions when the userspace does back to back
+		 * write() and close().
+		 */
+		usleep_range(5000, 7000);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_tx_empty);
+
+/*
+ *  Standard API, Stop transmitter.
+ *  Any character in the transmit shift register is sent as
+ *  well as the current data mover transfer .
+ */
+static void msm_hs_stop_tx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	tx->flush = FLUSH_STOP;
+}
+
+static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+	int ret = 0;
+
+	ret = sps_rx_disconnect(sps_pipe_handle);
+
+	if (msm_uport->rx.pending_flag)
+		MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
+		__func__, msm_uport->rx.pending_flag);
+	MSM_HS_DBG("%s(): clearing desc usage flag", __func__);
+	msm_uport->rx.queued_flag = 0;
+	msm_uport->rx.pending_flag = 0;
+	msm_uport->rx.rx_inx = 0;
+
+	if (ret)
+		MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__);
+	msm_uport->rx.flush = FLUSH_SHUTDOWN;
+	MSM_HS_DBG("%s: Calling Completion\n", __func__);
+	wake_up(&msm_uport->bam_disconnect_wait);
+	MSM_HS_DBG("%s: Done Completion\n", __func__);
+	wake_up(&msm_uport->rx.wait);
+	return ret;
+}
+
+static int sps_tx_disconnect(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &msm_uport->uport;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct sps_pipe *tx_pipe = tx->cons.pipe_handle;
+	unsigned long flags;
+	int ret = 0;
+
+	if (msm_uport->tx.flush == FLUSH_SHUTDOWN) {
+		MSM_HS_DBG("%s(): pipe already disonnected", __func__);
+		return ret;
+	}
+
+	ret = sps_disconnect(tx_pipe);
+
+	if (ret) {
+		MSM_HS_ERR("%s(): sps_disconnect failed %d", __func__, ret);
+		return ret;
+	}
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	MSM_HS_DBG("%s(): TX Disconnect", __func__);
+	return ret;
+}
+
+static void msm_hs_disable_rx(struct uart_port *uport)
+{
+	unsigned int data;
+
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+}
+
+/*
+ *  Standard API, Stop receiver as soon as possible.
+ *
+ *  Function immediately terminates the operation of the
+ *  channel receiver and any incoming characters are lost. None
+ *  of the receiver status bits are affected by this command and
+ *  characters that are already in the receive FIFO there.
+ */
+static void msm_hs_stop_rx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+	else
+		msm_hs_disable_rx(uport);
+
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_uport->rx.flush = FLUSH_STOP;
+}
+
+static void msm_hs_disconnect_rx(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_disable_rx(uport);
+	/* Disconnect the BAM RX pipe */
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_uport->rx.flush = FLUSH_STOP;
+	disconnect_rx_endpoint(msm_uport);
+	MSM_HS_DBG("%s(): rx->flush %d", __func__, msm_uport->rx.flush);
+}
+
+/* Tx timeout callback function */
+void tx_timeout_handler(unsigned long arg)
+{
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *) arg;
+	struct uart_port *uport = &msm_uport->uport;
+	int isr;
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): clocks are off", __func__);
+		return;
+	}
+
+	isr = msm_hs_read(uport, UART_DM_ISR);
+	if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
+		MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x", __func__, isr);
+	dump_uart_hs_registers(msm_uport);
+}
+
+/*  Transmit the next chunk of data */
+static void msm_hs_submit_tx_locked(struct uart_port *uport)
+{
+	int left;
+	int tx_count;
+	int aligned_tx_count;
+	dma_addr_t src_addr;
+	dma_addr_t aligned_src_addr;
+	u32 flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
+	struct sps_pipe *sps_pipe_handle;
+	int ret;
+
+	if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
+		tx->dma_in_flight = false;
+		msm_hs_stop_tx_locked(uport);
+		return;
+	}
+
+	tx_count = uart_circ_chars_pending(tx_buf);
+
+	if (tx_count > UARTDM_TX_BUF_SIZE)
+		tx_count = UARTDM_TX_BUF_SIZE;
+
+	left = UART_XMIT_SIZE - tx_buf->tail;
+
+	if (tx_count > left)
+		tx_count = left;
+
+	src_addr = tx->dma_base + tx_buf->tail;
+	/* Mask the src_addr to align on a cache
+	 * and add those bytes to tx_count
+	 */
+	aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
+	aligned_tx_count = tx_count + src_addr - aligned_src_addr;
+
+	dma_sync_single_for_device(uport->dev, aligned_src_addr,
+			aligned_tx_count, DMA_TO_DEVICE);
+
+	tx->tx_count = tx_count;
+
+	hex_dump_ipc(msm_uport, tx->ipc_tx_ctxt, "Tx",
+			&tx_buf->buf[tx_buf->tail], (u64)src_addr, tx_count);
+	sps_pipe_handle = tx->cons.pipe_handle;
+
+	/* Set 1 second timeout */
+	mod_timer(&tx->tx_timeout_timer,
+		jiffies + msecs_to_jiffies(MSEC_PER_SEC));
+	/* Queue transfer request to SPS */
+	ret = sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
+				msm_uport, flags);
+
+	MSM_HS_DBG("%s:Enqueue Tx Cmd, ret %d\n", __func__, ret);
+}
+
+/* This function queues the rx descriptor for BAM transfer */
+static void msm_hs_post_rx_desc(struct msm_hs_port *msm_uport, int inx)
+{
+	u32 flags = SPS_IOVEC_FLAG_INT;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int ret;
+
+	phys_addr_t rbuff_addr = rx->rbuffer + (UARTDM_RX_BUF_SIZE * inx);
+	u8 *virt_addr = rx->buffer + (UARTDM_RX_BUF_SIZE * inx);
+
+	MSM_HS_DBG("%s: %d:Queue desc %d, 0x%llx, base 0x%llx virtaddr %p",
+		__func__, msm_uport->uport.line, inx,
+		(u64)rbuff_addr, (u64)rx->rbuffer, virt_addr);
+
+	rx->iovec[inx].size = 0;
+	ret = sps_transfer_one(rx->prod.pipe_handle, rbuff_addr,
+		UARTDM_RX_BUF_SIZE, msm_uport, flags);
+
+	if (ret)
+		MSM_HS_ERR("Error processing descriptor %d", ret);
+}
+
+/* Update the rx descriptor index to specify the next one to be processed */
+static void msm_hs_mark_next(struct msm_hs_port *msm_uport, int inx)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int prev;
+
+	inx %= UART_DMA_DESC_NR;
+	MSM_HS_DBG("%s(): inx %d, pending 0x%lx", __func__, inx,
+		rx->pending_flag);
+
+	if (!inx)
+		prev = UART_DMA_DESC_NR - 1;
+	else
+		prev = inx - 1;
+
+	if (!test_bit(prev, &rx->pending_flag))
+		msm_uport->rx.rx_inx = inx;
+	MSM_HS_DBG("%s(): prev %d pending flag 0x%lx, next %d", __func__,
+		prev, rx->pending_flag, msm_uport->rx.rx_inx);
+}
+
+/*
+ *	Queue the rx descriptor that has just been processed or
+ *	all of them if queueing for the first time
+ */
+static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int i, flag = 0;
+
+	/* At first, queue all, if not, queue only one */
+	if (rx->queued_flag || rx->pending_flag) {
+		if (!test_bit(rx->rx_inx, &rx->queued_flag) &&
+		    !test_bit(rx->rx_inx, &rx->pending_flag)) {
+			msm_hs_post_rx_desc(msm_uport, rx->rx_inx);
+			set_bit(rx->rx_inx, &rx->queued_flag);
+			MSM_HS_DBG("%s(): Set Queued Bit %d",
+				__func__, rx->rx_inx);
+		} else
+			MSM_HS_ERR("%s(): rx_inx pending or queued", __func__);
+		return;
+	}
+
+	for (i = 0; i < UART_DMA_DESC_NR; i++) {
+		if (!test_bit(i, &rx->queued_flag) &&
+		!test_bit(i, &rx->pending_flag)) {
+			MSM_HS_DBG("%s(): Calling post rx %d", __func__, i);
+			msm_hs_post_rx_desc(msm_uport, i);
+			set_bit(i, &rx->queued_flag);
+			flag = 1;
+		}
+	}
+
+	if (!flag)
+		MSM_HS_ERR("%s(): error queueing descriptor", __func__);
+}
+
+/* Start to receive the next chunk of data */
+static void msm_hs_start_rx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	unsigned int buffer_pending = msm_uport->rx.buffer_pending;
+	unsigned int data;
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+	if (rx->pending_flag) {
+		MSM_HS_INFO("%s: Rx Cmd got executed, wait for rx_tlet\n",
+								 __func__);
+		rx->flush = FLUSH_IGNORE;
+		return;
+	}
+	if (buffer_pending)
+		MSM_HS_ERR("Error: rx started in buffer state =%x",
+			buffer_pending);
+
+	msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+	msm_hs_write(uport, UART_DM_DMRX, UARTDM_RX_BUF_SIZE);
+	msm_hs_write(uport, UART_DM_CR, STALE_EVENT_ENABLE);
+	/*
+	 * Enable UARTDM Rx Interface as previously it has been
+	 * disable in set_termios before configuring baud rate.
+	 */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Enable UARTDM Rx BAM Interface */
+	data |= UARTDM_RX_BAM_ENABLE_BMSK;
+
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/* Calling next DMOV API. Hence mb() here. */
+	mb();
+
+	/*
+	 * RX-transfer will be automatically re-activated
+	 * after last data of previous transfer was read.
+	 */
+	data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
+				RX_DMRX_CYCLIC_EN);
+	msm_hs_write(uport, UART_DM_RX_TRANS_CTRL, data);
+	/* Issue RX BAM Start IFC command */
+	msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+	/* Ensure register IO completion */
+	mb();
+
+	msm_uport->rx.flush = FLUSH_NONE;
+	msm_uport->rx_bam_inprogress = true;
+	msm_hs_queue_rx_desc(msm_uport);
+	msm_uport->rx_bam_inprogress = false;
+	wake_up(&msm_uport->rx.wait);
+	MSM_HS_DBG("%s:Enqueue Rx Cmd\n", __func__);
+}
+
+static void flip_insert_work(struct work_struct *work)
+{
+	unsigned long flags;
+	int retval;
+	struct msm_hs_port *msm_uport =
+		container_of(work, struct msm_hs_port,
+			     rx.flip_insert_work.work);
+	struct tty_struct *tty = msm_uport->uport.state->port.tty;
+
+	spin_lock_irqsave(&msm_uport->uport.lock, flags);
+	if (!tty || msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+		dev_err(msm_uport->uport.dev,
+			"%s:Invalid driver state flush %d\n",
+				__func__, msm_uport->rx.flush);
+		MSM_HS_ERR("%s:Invalid driver state flush %d\n",
+				__func__, msm_uport->rx.flush);
+		spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+		return;
+	}
+
+	if (msm_uport->rx.buffer_pending == NONE_PENDING) {
+		MSM_HS_ERR("Error: No buffer pending in %s", __func__);
+		spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+		return;
+	}
+	if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
+		retval = tty_insert_flip_char(tty->port, 0, TTY_OVERRUN);
+		if (retval)
+			msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
+	}
+	if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
+		retval = tty_insert_flip_char(tty->port, 0, TTY_PARITY);
+		if (retval)
+			msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
+	}
+	if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
+		int rx_count, rx_offset;
+
+		rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
+		rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
+		retval = tty_insert_flip_string(tty->port,
+			msm_uport->rx.buffer +
+			(msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)
+			+ rx_offset, rx_count);
+		msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
+						 PARITY_ERROR);
+		if (retval != rx_count)
+			msm_uport->rx.buffer_pending |= CHARS_NORMAL |
+				retval << 8 | (rx_count - retval) << 16;
+	}
+	if (msm_uport->rx.buffer_pending) {
+		schedule_delayed_work(&msm_uport->rx.flip_insert_work,
+				      msecs_to_jiffies(RETRY_TIMEOUT));
+	} else if (msm_uport->rx.flush <= FLUSH_IGNORE) {
+		MSM_HS_WARN("Pending buffers cleared, restarting");
+		clear_bit(msm_uport->rx.rx_inx,
+			&msm_uport->rx.pending_flag);
+		msm_hs_start_rx_locked(&msm_uport->uport);
+		msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
+	}
+	spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+	tty_flip_buffer_push(tty->port);
+}
+
+static void msm_serial_hs_rx_work(struct kthread_work *work)
+{
+	int retval;
+	int rx_count = 0;
+	unsigned long status;
+	unsigned long flags;
+	unsigned int error_f = 0;
+	struct uart_port *uport;
+	struct msm_hs_port *msm_uport;
+	unsigned int flush = FLUSH_DATA_INVALID;
+	struct tty_struct *tty;
+	struct sps_event_notify *notify;
+	struct msm_hs_rx *rx;
+	struct sps_pipe *sps_pipe_handle;
+	struct platform_device *pdev;
+	const struct msm_serial_hs_platform_data *pdata;
+
+	msm_uport = container_of((struct kthread_work *) work,
+				 struct msm_hs_port, rx.kwork);
+	msm_hs_resource_vote(msm_uport);
+	uport = &msm_uport->uport;
+	tty = uport->state->port.tty;
+	notify = &msm_uport->notify;
+	rx = &msm_uport->rx;
+	pdev = to_platform_device(uport->dev);
+	pdata = pdev->dev.platform_data;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	if (!tty || rx->flush == FLUSH_SHUTDOWN) {
+		dev_err(uport->dev, "%s:Invalid driver state flush %d\n",
+				__func__, rx->flush);
+		MSM_HS_ERR("%s:Invalid driver state flush %d\n",
+				__func__, rx->flush);
+		spin_unlock_irqrestore(&uport->lock, flags);
+		msm_hs_resource_unvote(msm_uport);
+		return;
+	}
+
+	/*
+	 * Process all pending descs or if nothing is
+	 * queued - called from termios
+	 */
+	while (!rx->buffer_pending &&
+		(rx->pending_flag || !rx->queued_flag)) {
+		MSM_HS_DBG("%s(): Loop P 0x%lx Q 0x%lx", __func__,
+			rx->pending_flag, rx->queued_flag);
+
+		status = msm_hs_read(uport, UART_DM_SR);
+
+		MSM_HS_DBG("In %s\n", __func__);
+
+		/* overflow is not connect to data in a FIFO */
+		if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
+			     (uport->read_status_mask & CREAD))) {
+			retval = tty_insert_flip_char(tty->port,
+							0, TTY_OVERRUN);
+			MSM_HS_WARN("%s(): RX Buffer Overrun Detected\n",
+				__func__);
+			if (!retval)
+				msm_uport->rx.buffer_pending |= TTY_OVERRUN;
+			uport->icount.buf_overrun++;
+			error_f = 1;
+		}
+
+		if (!(uport->ignore_status_mask & INPCK))
+			status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
+
+		if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
+			/* Can not tell diff between parity & frame error */
+			MSM_HS_WARN("msm_serial_hs: parity error\n");
+			uport->icount.parity++;
+			error_f = 1;
+			if (!(uport->ignore_status_mask & IGNPAR)) {
+				retval = tty_insert_flip_char(tty->port,
+							0, TTY_PARITY);
+				if (!retval)
+					msm_uport->rx.buffer_pending
+								|= TTY_PARITY;
+			}
+		}
+
+		if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
+			MSM_HS_DBG("msm_serial_hs: Rx break\n");
+			uport->icount.brk++;
+			error_f = 1;
+			if (!(uport->ignore_status_mask & IGNBRK)) {
+				retval = tty_insert_flip_char(tty->port,
+								0, TTY_BREAK);
+				if (!retval)
+					msm_uport->rx.buffer_pending
+								|= TTY_BREAK;
+			}
+		}
+
+		if (error_f)
+			msm_hs_write(uport, UART_DM_CR,	RESET_ERROR_STATUS);
+		flush = msm_uport->rx.flush;
+		if (flush == FLUSH_IGNORE)
+			if (!msm_uport->rx.buffer_pending) {
+				MSM_HS_DBG("%s: calling start_rx_locked\n",
+					__func__);
+				msm_hs_start_rx_locked(uport);
+			}
+		if (flush >= FLUSH_DATA_INVALID)
+			goto out;
+
+		rx_count = msm_uport->rx.iovec[msm_uport->rx.rx_inx].size;
+		hex_dump_ipc(msm_uport, rx->ipc_rx_ctxt, "Rx",
+			(msm_uport->rx.buffer +
+			(msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)),
+			msm_uport->rx.iovec[msm_uport->rx.rx_inx].addr,
+			rx_count);
+
+		 /*
+		  * We are in a spin locked context, spin lock taken at
+		  * other places where these flags are updated
+		  */
+		if (0 != (uport->read_status_mask & CREAD)) {
+			if (!test_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.pending_flag) &&
+			    !test_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.queued_flag))
+				MSM_HS_ERR("%s: RX INX not set", __func__);
+			else if (test_bit(msm_uport->rx.rx_inx,
+					&msm_uport->rx.pending_flag) &&
+				!test_bit(msm_uport->rx.rx_inx,
+					&msm_uport->rx.queued_flag)) {
+				MSM_HS_DBG("%s(): Clear Pending Bit %d",
+					__func__, msm_uport->rx.rx_inx);
+
+				retval = tty_insert_flip_string(tty->port,
+					msm_uport->rx.buffer +
+					(msm_uport->rx.rx_inx *
+					UARTDM_RX_BUF_SIZE),
+					rx_count);
+
+				if (retval != rx_count) {
+					MSM_HS_INFO("%s(): ret %d rx_count %d",
+						__func__, retval, rx_count);
+					msm_uport->rx.buffer_pending |=
+					CHARS_NORMAL | retval << 5 |
+					(rx_count - retval) << 16;
+				}
+			} else
+				MSM_HS_ERR("%s: Error in inx %d", __func__,
+					msm_uport->rx.rx_inx);
+		}
+
+		if (!msm_uport->rx.buffer_pending) {
+			msm_uport->rx.flush = FLUSH_NONE;
+			msm_uport->rx_bam_inprogress = true;
+			sps_pipe_handle = rx->prod.pipe_handle;
+			MSM_HS_DBG("Queing bam descriptor\n");
+			/* Queue transfer request to SPS */
+			clear_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.pending_flag);
+			msm_hs_queue_rx_desc(msm_uport);
+			msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
+			msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+			msm_uport->rx_bam_inprogress = false;
+			wake_up(&msm_uport->rx.wait);
+		} else
+			break;
+
+	}
+out:
+	if (msm_uport->rx.buffer_pending) {
+		MSM_HS_WARN("%s: tty buffer exhausted. Stalling\n", __func__);
+		schedule_delayed_work(&msm_uport->rx.flip_insert_work
+				      , msecs_to_jiffies(RETRY_TIMEOUT));
+	}
+	/* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
+	spin_unlock_irqrestore(&uport->lock, flags);
+	if (flush < FLUSH_DATA_INVALID)
+		tty_flip_buffer_push(tty->port);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void msm_hs_start_tx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	/* Bail if transfer in progress */
+	if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
+		MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
+			__func__, tx->flush, tx->dma_in_flight);
+		return;
+	}
+
+	if (!tx->dma_in_flight) {
+		tx->dma_in_flight = true;
+		kthread_queue_work(&msm_uport->tx.kworker,
+			&msm_uport->tx.kwork);
+	}
+}
+
+/**
+ * Callback notification from SPS driver
+ *
+ * This callback function gets triggered called from
+ * SPS driver when requested SPS data transfer is
+ * completed.
+ *
+ */
+
+static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
+{
+	struct msm_hs_port *msm_uport =
+		(struct msm_hs_port *)
+		((struct sps_event_notify *)notify)->user;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+
+	msm_uport->notify = *notify;
+	MSM_HS_INFO("tx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+		&addr, notify->data.transfer.iovec.size,
+		notify->data.transfer.iovec.flags);
+
+	del_timer(&msm_uport->tx.tx_timeout_timer);
+	MSM_HS_DBG("%s(): Queue kthread work", __func__);
+	kthread_queue_work(&msm_uport->tx.kworker, &msm_uport->tx.kwork);
+}
+
+static void msm_serial_hs_tx_work(struct kthread_work *work)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport =
+			container_of((struct kthread_work *)work,
+			struct msm_hs_port, tx.kwork);
+	struct uart_port *uport = &msm_uport->uport;
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	/*
+	 * Do the work buffer related work in BAM
+	 * mode that is equivalent to legacy mode
+	 */
+	msm_hs_resource_vote(msm_uport);
+	if (tx->flush >= FLUSH_STOP) {
+		spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+		tx->flush = FLUSH_NONE;
+		MSM_HS_DBG("%s(): calling submit_tx", __func__);
+		msm_hs_submit_tx_locked(uport);
+		spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+		msm_hs_resource_unvote(msm_uport);
+		return;
+	}
+
+	spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+	if (!uart_circ_empty(tx_buf))
+		tx_buf->tail = (tx_buf->tail +
+		tx->tx_count) & ~UART_XMIT_SIZE;
+	else
+		MSM_HS_DBG("%s:circ buffer is empty\n", __func__);
+
+	wake_up(&msm_uport->tx.wait);
+
+	uport->icount.tx += tx->tx_count;
+
+	/*
+	 * Calling to send next chunk of data
+	 * If the circ buffer is empty, we stop
+	 * If the clock off was requested, the clock
+	 * off sequence is kicked off
+	 */
+	 MSM_HS_DBG("%s(): calling submit_tx", __func__);
+	 msm_hs_submit_tx_locked(uport);
+
+	if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
+		uart_write_wakeup(uport);
+
+	spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void
+msm_hs_mark_proc_rx_desc(struct msm_hs_port *msm_uport,
+			struct sps_event_notify *notify)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+	/* divide by UARTDM_RX_BUF_SIZE */
+	int inx = (addr - rx->rbuffer) >> 9;
+
+	set_bit(inx, &rx->pending_flag);
+	clear_bit(inx, &rx->queued_flag);
+	rx->iovec[inx] = notify->data.transfer.iovec;
+	MSM_HS_DBG("Clear Q, Set P Bit %d, Q 0x%lx P 0x%lx",
+		inx, rx->queued_flag, rx->pending_flag);
+}
+
+/**
+ * Callback notification from SPS driver
+ *
+ * This callback function gets triggered called from
+ * SPS driver when requested SPS data transfer is
+ * completed.
+ *
+ */
+
+static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
+{
+
+	struct msm_hs_port *msm_uport =
+		(struct msm_hs_port *)
+		((struct sps_event_notify *)notify)->user;
+	struct uart_port *uport;
+	unsigned long flags;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+	/* divide by UARTDM_RX_BUF_SIZE */
+	int inx = (addr - rx->rbuffer) >> 9;
+
+	uport = &(msm_uport->uport);
+	msm_uport->notify = *notify;
+	MSM_HS_INFO("rx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+		&addr, notify->data.transfer.iovec.size,
+		notify->data.transfer.iovec.flags);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_mark_proc_rx_desc(msm_uport, notify);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	if (msm_uport->rx.flush == FLUSH_NONE) {
+		/* Test if others are queued */
+		if (msm_uport->rx.pending_flag & ~(1 << inx)) {
+			MSM_HS_DBG("%s(): inx 0x%x, 0x%lx not processed",
+			__func__, inx,
+			msm_uport->rx.pending_flag & ~(1<<inx));
+		}
+		kthread_queue_work(&msm_uport->rx.kworker,
+				&msm_uport->rx.kwork);
+		MSM_HS_DBG("%s(): Scheduled rx_tlet", __func__);
+	}
+}
+
+/*
+ *  Standard API, Current states of modem control inputs
+ *
+ * Since CTS can be handled entirely by HARDWARE we always
+ * indicate clear to send and count on the TX FIFO to block when
+ * it fills up.
+ *
+ * - TIOCM_DCD
+ * - TIOCM_CTS
+ * - TIOCM_DSR
+ * - TIOCM_RI
+ *  (Unsupported) DCD and DSR will return them high. RI will return low.
+ */
+static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
+{
+	return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
+/*
+ *  Standard API, Set or clear RFR_signal
+ *
+ * Set RFR high, (Indicate we are not ready for data), we disable auto
+ * ready for receiving and then set RFR_N high. To set RFR to low we just turn
+ * back auto ready for receiving and it should lower RFR signal
+ * when hardware is ready
+ */
+void msm_hs_set_mctrl_locked(struct uart_port *uport,
+				    unsigned int mctrl)
+{
+	unsigned int set_rts;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+	/* RTS is active low */
+	set_rts = TIOCM_RTS & mctrl ? 0 : 1;
+	MSM_HS_INFO("%s: set_rts %d\n", __func__, set_rts);
+
+	if (set_rts)
+		msm_hs_disable_flow_control(uport, false);
+	else
+		msm_hs_enable_flow_control(uport, false);
+}
+
+void msm_hs_set_mctrl(struct uart_port *uport,
+				    unsigned int mctrl)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_set_mctrl_locked(uport, mctrl);
+	spin_unlock_irqrestore(&uport->lock, flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+EXPORT_SYMBOL(msm_hs_set_mctrl);
+
+/* Standard API, Enable modem status (CTS) interrupt  */
+static void msm_hs_enable_ms_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+
+	/* Enable DELTA_CTS Interrupt */
+	msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/* Ensure register IO completion */
+	mb();
+
+}
+
+/*
+ *  Standard API, Break Signal
+ *
+ * Control the transmission of a break signal. ctl eq 0 => break
+ * signal terminate ctl ne 0 => start break signal
+ */
+static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_write(uport, UART_DM_CR, ctl ? START_BREAK : STOP_BREAK);
+	/* Ensure register IO completion */
+	mb();
+	spin_unlock_irqrestore(&uport->lock, flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
+{
+	if (cfg_flags & UART_CONFIG_TYPE)
+		uport->type = PORT_MSM;
+
+}
+
+/*  Handle CTS changes (Called from interrupt handler) */
+static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	/* clear interrupt */
+	msm_hs_write(uport, UART_DM_CR, RESET_CTS);
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
+	uport->icount.cts++;
+
+	/* clear the IOCTL TIOCMIWAIT if called */
+	wake_up_interruptible(&uport->state->port.delta_msr_wait);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static irqreturn_t msm_hs_isr(int irq, void *dev)
+{
+	unsigned long flags;
+	unsigned int isr_status;
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
+	struct uart_port *uport = &msm_uport->uport;
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	isr_status = msm_hs_read(uport, UART_DM_MISR);
+	MSM_HS_INFO("%s: DM_ISR: 0x%x\n", __func__, isr_status);
+	dump_uart_hs_registers(msm_uport);
+
+	/* Uart RX starting */
+	if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
+		MSM_HS_DBG("%s:UARTDM_ISR_RXLEV_BMSK\n", __func__);
+		msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
+		msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+		/* Complete device write for IMR. Hence mb() requires. */
+		mb();
+	}
+	/* Stale rx interrupt */
+	if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
+		msm_hs_write(uport, UART_DM_CR, STALE_EVENT_DISABLE);
+		msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+		/*
+		 * Complete device write before calling DMOV API. Hence
+		 * mb() requires here.
+		 */
+		mb();
+		MSM_HS_DBG("%s:Stal Interrupt\n", __func__);
+	}
+	/* tx ready interrupt */
+	if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
+		MSM_HS_DBG("%s: ISR_TX_READY Interrupt\n", __func__);
+		/* Clear  TX Ready */
+		msm_hs_write(uport, UART_DM_CR, CLEAR_TX_READY);
+
+		/*
+		 * Complete both writes before starting new TX.
+		 * Hence mb() requires here.
+		 */
+		mb();
+		/* Complete DMA TX transactions and submit new transactions */
+
+		/* Do not update tx_buf.tail if uart_flush_buffer already
+		 * called in serial core
+		 */
+		if (!uart_circ_empty(tx_buf))
+			tx_buf->tail = (tx_buf->tail +
+					tx->tx_count) & ~UART_XMIT_SIZE;
+
+		tx->dma_in_flight = false;
+
+		uport->icount.tx += tx->tx_count;
+
+		if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
+			uart_write_wakeup(uport);
+	}
+	if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
+		/* TX FIFO is empty */
+		msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
+		msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+		MSM_HS_DBG("%s: TXLEV Interrupt\n", __func__);
+		/*
+		 * Complete device write before starting clock_off request.
+		 * Hence mb() requires here.
+		 */
+		mb();
+		queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
+	}
+
+	/* Change in CTS interrupt */
+	if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
+		msm_hs_handle_delta_cts_locked(uport);
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+/* The following two functions provide interfaces to get the underlying
+ * port structure (struct uart_port or struct msm_hs_port) given
+ * the port index. msm_hs_get_uart port is called by clients.
+ * The function msm_hs_get_hs_port is for internal use
+ */
+
+struct uart_port *msm_hs_get_uart_port(int port_index)
+{
+	struct uart_state *state = msm_hs_driver.state + port_index;
+
+	/* The uart_driver structure stores the states in an array.
+	 * Thus the corresponding offset from the drv->state returns
+	 * the state for the uart_port that is requested
+	 */
+	if (port_index == state->uart_port->line)
+		return state->uart_port;
+
+	return NULL;
+}
+EXPORT_SYMBOL(msm_hs_get_uart_port);
+
+static struct msm_hs_port *msm_hs_get_hs_port(int port_index)
+{
+	struct uart_port *uport = msm_hs_get_uart_port(port_index);
+
+	if (uport)
+		return UARTDM_TO_MSM(uport);
+	return NULL;
+}
+
+void enable_wakeup_interrupt(struct msm_hs_port *msm_uport)
+{
+	unsigned long flags;
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (!is_use_low_power_wakeup(msm_uport))
+		return;
+	if (msm_uport->wakeup.freed)
+		return;
+
+	if (!(msm_uport->wakeup.enabled)) {
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_uport->wakeup.ignore = 1;
+		msm_uport->wakeup.enabled = true;
+		spin_unlock_irqrestore(&uport->lock, flags);
+		disable_irq(uport->irq);
+		enable_irq(msm_uport->wakeup.irq);
+	} else {
+		MSM_HS_WARN("%s:Wake up IRQ already enabled", __func__);
+	}
+}
+
+void disable_wakeup_interrupt(struct msm_hs_port *msm_uport)
+{
+	unsigned long flags;
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (!is_use_low_power_wakeup(msm_uport))
+		return;
+	if (msm_uport->wakeup.freed)
+		return;
+
+	if (msm_uport->wakeup.enabled) {
+		disable_irq_nosync(msm_uport->wakeup.irq);
+		enable_irq(uport->irq);
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_uport->wakeup.enabled = false;
+		spin_unlock_irqrestore(&uport->lock, flags);
+	} else {
+		MSM_HS_WARN("%s:Wake up IRQ already disabled", __func__);
+	}
+}
+
+void msm_hs_resource_off(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned int data;
+
+	MSM_HS_DBG("%s(): begin", __func__);
+	msm_hs_disable_flow_control(uport, false);
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_hs_disconnect_rx(uport);
+
+	/* disable dlink */
+	if (msm_uport->tx.flush == FLUSH_NONE)
+		wait_event_timeout(msm_uport->tx.wait,
+			msm_uport->tx.flush == FLUSH_STOP, 500);
+
+	if (msm_uport->tx.flush != FLUSH_SHUTDOWN) {
+		data = msm_hs_read(uport, UART_DM_DMEN);
+		data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
+		msm_hs_write(uport, UART_DM_DMEN, data);
+		sps_tx_disconnect(msm_uport);
+	}
+	if (!atomic_read(&msm_uport->client_req_state))
+		msm_hs_enable_flow_control(uport, false);
+}
+
+void msm_hs_resource_on(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned int data;
+	unsigned long flags;
+
+	if (msm_uport->rx.flush == FLUSH_SHUTDOWN ||
+	msm_uport->rx.flush == FLUSH_STOP) {
+		msm_hs_write(uport, UART_DM_CR, RESET_RX);
+		data = msm_hs_read(uport, UART_DM_DMEN);
+		data |= UARTDM_RX_BAM_ENABLE_BMSK;
+		msm_hs_write(uport, UART_DM_DMEN, data);
+	}
+
+	msm_hs_spsconnect_tx(msm_uport);
+	if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+		msm_hs_spsconnect_rx(uport);
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_hs_start_rx_locked(uport);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	}
+}
+
+/* Request to turn off uart clock once pending TX is flushed */
+int msm_hs_request_clock_off(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int ret = 0;
+	int client_count = 0;
+
+	mutex_lock(&msm_uport->mtx);
+	/*
+	 * If we're in the middle of a system suspend, don't process these
+	 * userspace/kernel API commands.
+	 */
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
+		MSM_HS_WARN("%s:Can't process clk request during suspend",
+			__func__);
+		ret = -EIO;
+	}
+	mutex_unlock(&msm_uport->mtx);
+	if (ret)
+		goto exit_request_clock_off;
+
+	if (atomic_read(&msm_uport->client_count) <= 0) {
+		MSM_HS_WARN("%s(): ioctl count -ve, client check voting",
+			__func__);
+		ret = -EPERM;
+		goto exit_request_clock_off;
+	}
+	/* Set the flag to disable flow control and wakeup irq */
+	if (msm_uport->obs)
+		atomic_set(&msm_uport->client_req_state, 1);
+	msm_hs_resource_unvote(msm_uport);
+	atomic_dec(&msm_uport->client_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s: Client_Count %d\n", __func__,
+			client_count);
+exit_request_clock_off:
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_request_clock_off);
+
+int msm_hs_request_clock_on(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int client_count;
+	int ret = 0;
+
+	mutex_lock(&msm_uport->mtx);
+	/*
+	 * If we're in the middle of a system suspend, don't process these
+	 * userspace/kernel API commands.
+	 */
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
+		MSM_HS_WARN("%s:Can't process clk request during suspend",
+			__func__);
+		ret = -EIO;
+	}
+	mutex_unlock(&msm_uport->mtx);
+	if (ret)
+		goto exit_request_clock_on;
+
+	msm_hs_resource_vote(UARTDM_TO_MSM(uport));
+	atomic_inc(&msm_uport->client_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s: Client_Count %d\n", __func__,
+			client_count);
+
+	/* Clear the flag */
+	if (msm_uport->obs)
+		atomic_set(&msm_uport->client_req_state, 0);
+exit_request_clock_on:
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_request_clock_on);
+
+static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
+{
+	unsigned int wakeup = 0;
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
+	struct uart_port *uport = &msm_uport->uport;
+	struct tty_struct *tty = NULL;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	if (msm_uport->wakeup.ignore)
+		msm_uport->wakeup.ignore = 0;
+	else
+		wakeup = 1;
+
+	if (wakeup) {
+		/*
+		 * Port was clocked off during rx, wake up and
+		 * optionally inject char into tty rx
+		 */
+		if (msm_uport->wakeup.inject_rx) {
+			tty = uport->state->port.tty;
+			tty_insert_flip_char(tty->port,
+					     msm_uport->wakeup.rx_to_inject,
+					     TTY_NORMAL);
+			hex_dump_ipc(msm_uport, msm_uport->rx.ipc_rx_ctxt,
+				"Rx Inject",
+				&msm_uport->wakeup.rx_to_inject, 0, 1);
+			MSM_HS_INFO("Wakeup ISR.Ignore%d\n",
+						msm_uport->wakeup.ignore);
+		}
+	}
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	if (wakeup && msm_uport->wakeup.inject_rx)
+		tty_flip_buffer_push(tty->port);
+	return IRQ_HANDLED;
+}
+
+static const char *msm_hs_type(struct uart_port *port)
+{
+	return "MSM HS UART";
+}
+
+/**
+ * msm_hs_unconfig_uart_gpios: Unconfigures UART GPIOs
+ * @uport: uart port
+ */
+static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
+{
+	struct platform_device *pdev = to_platform_device(uport->dev);
+	const struct msm_serial_hs_platform_data *pdata =
+					pdev->dev.platform_data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int ret;
+
+	if (msm_uport->use_pinctrl) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_suspend);
+		if (ret)
+			MSM_HS_ERR("%s():Failed to pinctrl set_state",
+				__func__);
+	} else if (pdata) {
+		if (gpio_is_valid(pdata->uart_tx_gpio))
+			gpio_free(pdata->uart_tx_gpio);
+		if (gpio_is_valid(pdata->uart_rx_gpio))
+			gpio_free(pdata->uart_rx_gpio);
+		if (gpio_is_valid(pdata->uart_cts_gpio))
+			gpio_free(pdata->uart_cts_gpio);
+		if (gpio_is_valid(pdata->uart_rfr_gpio))
+			gpio_free(pdata->uart_rfr_gpio);
+	} else
+		MSM_HS_ERR("Error:Pdata is NULL.\n");
+}
+
+/**
+ * msm_hs_config_uart_gpios - Configures UART GPIOs
+ * @uport: uart port
+ */
+static int msm_hs_config_uart_gpios(struct uart_port *uport)
+{
+	struct platform_device *pdev = to_platform_device(uport->dev);
+	const struct msm_serial_hs_platform_data *pdata =
+					pdev->dev.platform_data;
+	int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (!IS_ERR_OR_NULL(msm_uport->pinctrl)) {
+		MSM_HS_DBG("%s(): Using Pinctrl", __func__);
+		msm_uport->use_pinctrl = true;
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_active);
+		if (ret)
+			MSM_HS_ERR("%s(): Failed to pinctrl set_state",
+				__func__);
+		return ret;
+	} else if (pdata) {
+		/* Fall back to using gpio lib */
+		if (gpio_is_valid(pdata->uart_tx_gpio)) {
+			ret = gpio_request(pdata->uart_tx_gpio,
+							"UART_TX_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_tx_gpio);
+				goto exit_uart_config;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_rx_gpio)) {
+			ret = gpio_request(pdata->uart_rx_gpio,
+							"UART_RX_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_rx_gpio);
+				goto uart_tx_unconfig;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_cts_gpio)) {
+			ret = gpio_request(pdata->uart_cts_gpio,
+							"UART_CTS_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_cts_gpio);
+				goto uart_rx_unconfig;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_rfr_gpio)) {
+			ret = gpio_request(pdata->uart_rfr_gpio,
+							"UART_RFR_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_rfr_gpio);
+				goto uart_cts_unconfig;
+			}
+		}
+	} else {
+		MSM_HS_ERR("Pdata is NULL.\n");
+		ret = -EINVAL;
+	}
+	return ret;
+
+uart_cts_unconfig:
+	if (gpio_is_valid(pdata->uart_cts_gpio))
+		gpio_free(pdata->uart_cts_gpio);
+uart_rx_unconfig:
+	if (gpio_is_valid(pdata->uart_rx_gpio))
+		gpio_free(pdata->uart_rx_gpio);
+uart_tx_unconfig:
+	if (gpio_is_valid(pdata->uart_tx_gpio))
+		gpio_free(pdata->uart_tx_gpio);
+exit_uart_config:
+	return ret;
+}
+
+
+static void msm_hs_get_pinctrl_configs(struct uart_port *uport)
+{
+	struct pinctrl_state *set_state;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_uport->pinctrl = devm_pinctrl_get(uport->dev);
+	if (IS_ERR_OR_NULL(msm_uport->pinctrl)) {
+		MSM_HS_DBG("%s(): Pinctrl not defined", __func__);
+	} else {
+		MSM_HS_DBG("%s(): Using Pinctrl", __func__);
+		msm_uport->use_pinctrl = true;
+
+		set_state = pinctrl_lookup_state(msm_uport->pinctrl,
+						PINCTRL_STATE_DEFAULT);
+		if (IS_ERR_OR_NULL(set_state)) {
+			dev_err(uport->dev,
+				"pinctrl lookup failed for default state");
+			goto pinctrl_fail;
+		}
+
+		MSM_HS_DBG("%s(): Pinctrl state active %p\n", __func__,
+			set_state);
+		msm_uport->gpio_state_active = set_state;
+
+		set_state = pinctrl_lookup_state(msm_uport->pinctrl,
+						PINCTRL_STATE_SLEEP);
+		if (IS_ERR_OR_NULL(set_state)) {
+			dev_err(uport->dev,
+				"pinctrl lookup failed for sleep state");
+			goto pinctrl_fail;
+		}
+
+		MSM_HS_DBG("%s(): Pinctrl state sleep %p\n", __func__,
+			set_state);
+		msm_uport->gpio_state_suspend = set_state;
+		return;
+	}
+pinctrl_fail:
+	msm_uport->pinctrl = NULL;
+}
+
+/* Called when port is opened */
+static int msm_hs_startup(struct uart_port *uport)
+{
+	int ret;
+	int rfr_level;
+	unsigned long flags;
+	unsigned int data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
+	struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
+
+	rfr_level = uport->fifosize;
+	if (rfr_level > 16)
+		rfr_level -= 16;
+
+	tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
+				      DMA_TO_DEVICE);
+
+	/* turn on uart clk */
+	msm_hs_resource_vote(msm_uport);
+
+	if (is_use_low_power_wakeup(msm_uport)) {
+		ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
+					msm_hs_wakeup_isr,
+					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+					"msm_hs_wakeup", msm_uport);
+		if (unlikely(ret)) {
+			MSM_HS_ERR("%s():Err getting uart wakeup_irq %d\n",
+				  __func__, ret);
+			goto unvote_exit;
+		}
+
+		msm_uport->wakeup.freed = false;
+		disable_irq(msm_uport->wakeup.irq);
+		msm_uport->wakeup.enabled = false;
+
+		ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
+		if (unlikely(ret)) {
+			MSM_HS_ERR("%s():Err setting wakeup irq\n", __func__);
+			goto free_uart_irq;
+		}
+	}
+
+	ret = msm_hs_config_uart_gpios(uport);
+	if (ret) {
+		MSM_HS_ERR("Uart GPIO request failed\n");
+		goto free_uart_irq;
+	}
+
+	msm_hs_write(uport, UART_DM_DMEN, 0);
+
+	/* Connect TX */
+	sps_tx_disconnect(msm_uport);
+	ret = msm_hs_spsconnect_tx(msm_uport);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: SPS connect failed for TX");
+		goto unconfig_uart_gpios;
+	}
+
+	/* Connect RX */
+	kthread_flush_worker(&msm_uport->rx.kworker);
+	if (rx->flush != FLUSH_SHUTDOWN)
+		disconnect_rx_endpoint(msm_uport);
+	ret = msm_hs_spsconnect_rx(uport);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: SPS connect failed for RX");
+		goto sps_disconnect_tx;
+	}
+
+	data = (UARTDM_BCR_TX_BREAK_DISABLE | UARTDM_BCR_STALE_IRQ_EMPTY |
+		UARTDM_BCR_RX_DMRX_LOW_EN | UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL |
+		UARTDM_BCR_RX_DMRX_1BYTE_RES_EN);
+	msm_hs_write(uport, UART_DM_BCR, data);
+
+	/* Set auto RFR Level */
+	data = msm_hs_read(uport, UART_DM_MR1);
+	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
+	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
+	data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
+	data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
+	msm_hs_write(uport, UART_DM_MR1, data);
+
+	/* Make sure RXSTALE count is non-zero */
+	data = msm_hs_read(uport, UART_DM_IPR);
+	if (!data) {
+		data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
+		msm_hs_write(uport, UART_DM_IPR, data);
+	}
+
+	/* Assume no flow control, unless termios sets it */
+	msm_uport->flow_control = false;
+	msm_hs_disable_flow_control(uport, true);
+
+
+	/* Reset TX */
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+	msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
+	msm_hs_write(uport, UART_DM_CR, RESET_BREAK_INT);
+	msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+	msm_hs_write(uport, UART_DM_CR, RESET_CTS);
+	msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+	/* Turn on Uart Receiver */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
+
+	/* Turn on Uart Transmitter */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
+
+	tx->dma_in_flight = false;
+	MSM_HS_DBG("%s():desc usage flag 0x%lx", __func__, rx->queued_flag);
+	setup_timer(&(tx->tx_timeout_timer),
+			tx_timeout_handler,
+			(unsigned long) msm_uport);
+
+	/* Enable reading the current CTS, no harm even if CTS is ignored */
+	msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
+
+	/* TXLEV on empty TX fifo */
+	msm_hs_write(uport, UART_DM_TFWR, 4);
+	/*
+	 * Complete all device write related configuration before
+	 * queuing RX request. Hence mb() requires here.
+	 */
+	mb();
+
+	ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
+			  "msm_hs_uart", msm_uport);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("%s():Error %d getting uart irq\n", __func__, ret);
+		goto sps_disconnect_rx;
+	}
+
+
+	spin_lock_irqsave(&uport->lock, flags);
+	atomic_set(&msm_uport->client_count, 0);
+	atomic_set(&msm_uport->client_req_state, 0);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s: Client_Count 0\n", __func__);
+	msm_hs_start_rx_locked(uport);
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	msm_hs_resource_unvote(msm_uport);
+	return 0;
+
+sps_disconnect_rx:
+	sps_disconnect(sps_pipe_handle_rx);
+sps_disconnect_tx:
+	sps_disconnect(sps_pipe_handle_tx);
+unconfig_uart_gpios:
+	msm_hs_unconfig_uart_gpios(uport);
+free_uart_irq:
+	free_irq(uport->irq, msm_uport);
+unvote_exit:
+	msm_hs_resource_unvote(msm_uport);
+	MSM_HS_ERR("%s(): Error return\n", __func__);
+	return ret;
+}
+
+/* Initialize tx and rx data structures */
+static int uartdm_init_port(struct uart_port *uport)
+{
+	int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+
+	init_waitqueue_head(&rx->wait);
+	init_waitqueue_head(&tx->wait);
+	init_waitqueue_head(&msm_uport->bam_disconnect_wait);
+
+	/* Init kernel threads for tx and rx */
+
+	kthread_init_worker(&rx->kworker);
+	rx->task = kthread_run(kthread_worker_fn,
+			&rx->kworker, "msm_serial_hs_%d_rx_work", uport->line);
+	if (IS_ERR(rx->task)) {
+		MSM_HS_ERR("%s(): error creating task", __func__);
+		goto exit_lh_init;
+	}
+	kthread_init_work(&rx->kwork, msm_serial_hs_rx_work);
+
+	kthread_init_worker(&tx->kworker);
+	tx->task = kthread_run(kthread_worker_fn,
+			&tx->kworker, "msm_serial_hs_%d_tx_work", uport->line);
+	if (IS_ERR(rx->task)) {
+		MSM_HS_ERR("%s(): error creating task", __func__);
+		goto exit_lh_init;
+	}
+
+	kthread_init_work(&tx->kwork, msm_serial_hs_tx_work);
+
+	rx->buffer = dma_alloc_coherent(uport->dev,
+				UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
+				 &rx->rbuffer, GFP_KERNEL);
+	if (!rx->buffer) {
+		MSM_HS_ERR("%s(): cannot allocate rx->buffer", __func__);
+		ret = -ENOMEM;
+		goto exit_lh_init;
+	}
+
+	/* Set up Uart Receive */
+	msm_hs_write(uport, UART_DM_RFWR, 32);
+	/* Write to BADR explicitly to set up FIFO sizes */
+	msm_hs_write(uport, UARTDM_BADR_ADDR, 64);
+
+	INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
+
+	return ret;
+exit_lh_init:
+	kthread_stop(rx->task);
+	rx->task = NULL;
+	kthread_stop(tx->task);
+	tx->task = NULL;
+	return ret;
+}
+
+struct msm_serial_hs_platform_data
+	*msm_hs_dt_to_pdata(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct msm_serial_hs_platform_data *pdata;
+	u32 rx_to_inject;
+	int ret;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+	/* UART TX GPIO */
+	pdata->uart_tx_gpio = of_get_named_gpio(node,
+					"qcom,tx-gpio", 0);
+	if (pdata->uart_tx_gpio < 0)
+		pr_err("uart_tx_gpio is not available\n");
+
+	/* UART RX GPIO */
+	pdata->uart_rx_gpio = of_get_named_gpio(node,
+					"qcom,rx-gpio", 0);
+	if (pdata->uart_rx_gpio < 0)
+		pr_err("uart_rx_gpio is not available\n");
+
+	/* UART CTS GPIO */
+	pdata->uart_cts_gpio = of_get_named_gpio(node,
+					"qcom,cts-gpio", 0);
+	if (pdata->uart_cts_gpio < 0)
+		pr_err("uart_cts_gpio is not available\n");
+
+	/* UART RFR GPIO */
+	pdata->uart_rfr_gpio = of_get_named_gpio(node,
+					"qcom,rfr-gpio", 0);
+	if (pdata->uart_rfr_gpio < 0)
+		pr_err("uart_rfr_gpio is not available\n");
+
+	pdata->no_suspend_delay = of_property_read_bool(node,
+				"qcom,no-suspend-delay");
+
+	pdata->obs = of_property_read_bool(node,
+				"qcom,msm-obs");
+	if (pdata->obs)
+		pr_err("%s:Out of Band sleep flag is set\n", __func__);
+
+	pdata->inject_rx_on_wakeup = of_property_read_bool(node,
+				"qcom,inject-rx-on-wakeup");
+
+	if (pdata->inject_rx_on_wakeup) {
+		ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
+						&rx_to_inject);
+		if (ret < 0) {
+			pr_err("Error: Rx_char_to_inject not specified.\n");
+			return ERR_PTR(ret);
+		}
+		pdata->rx_to_inject = (u8)rx_to_inject;
+	}
+
+	ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
+				&pdata->bam_tx_ep_pipe_index);
+	if (ret < 0) {
+		pr_err("Error: Getting UART BAM TX EP Pipe Index.\n");
+		return ERR_PTR(ret);
+	}
+
+	if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
+		pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
+		pr_err("Error: Invalid UART BAM TX EP Pipe Index.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
+					&pdata->bam_rx_ep_pipe_index);
+	if (ret < 0) {
+		pr_err("Error: Getting UART BAM RX EP Pipe Index.\n");
+		return ERR_PTR(ret);
+	}
+
+	if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
+		pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
+		pr_err("Error: Invalid UART BAM RX EP Pipe Index.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	pr_debug("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
+		"tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d",
+		pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
+		pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
+		pdata->uart_rfr_gpio);
+
+	return pdata;
+}
+
+
+/**
+ * Deallocate UART peripheral's SPS endpoint
+ * @msm_uport - Pointer to msm_hs_port structure
+ * @ep - Pointer to sps endpoint data structure
+ */
+
+static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
+				struct msm_hs_sps_ep_conn_data *ep)
+{
+	struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
+	struct sps_connect *sps_config = &ep->config;
+
+	dma_free_coherent(msm_uport->uport.dev,
+			sps_config->desc.size,
+			&sps_config->desc.phys_base,
+			GFP_KERNEL);
+	sps_free_endpoint(sps_pipe_handle);
+}
+
+
+/**
+ * Allocate UART peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context
+ * by calling appropriate SPS driver APIs.
+ *
+ * @msm_uport - Pointer to msm_hs_port structure
+ * @ep - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ *             - 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value
+ */
+
+static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
+				struct msm_hs_sps_ep_conn_data *ep,
+				bool is_producer)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_handle;
+	struct sps_connect *sps_config = &ep->config;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Allocate endpoint context */
+	sps_pipe_handle = sps_alloc_endpoint();
+	if (!sps_pipe_handle) {
+		MSM_HS_ERR("%s(): sps_alloc_endpoint() failed!!\n"
+			"is_producer=%d", __func__, is_producer);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Get default connection configuration for an endpoint */
+	rc = sps_get_config(sps_pipe_handle, sps_config);
+	if (rc) {
+		MSM_HS_ERR("%s(): failed! pipe_handle=0x%p rc=%d",
+			__func__, sps_pipe_handle, rc);
+		goto get_config_err;
+	}
+
+	/* Modify the default connection configuration */
+	if (is_producer) {
+		/* For UART producer transfer, source is UART peripheral
+		 * where as destination is system memory
+		 */
+		sps_config->source = msm_uport->bam_handle;
+		sps_config->destination = SPS_DEV_HANDLE_MEM;
+		sps_config->mode = SPS_MODE_SRC;
+		sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
+		sps_config->dest_pipe_index = 0;
+		sps_event->callback = msm_hs_sps_rx_callback;
+	} else {
+		/* For UART consumer transfer, source is system memory
+		 * where as destination is UART peripheral
+		 */
+		sps_config->source = SPS_DEV_HANDLE_MEM;
+		sps_config->destination = msm_uport->bam_handle;
+		sps_config->mode = SPS_MODE_DEST;
+		sps_config->src_pipe_index = 0;
+		sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
+		sps_event->callback = msm_hs_sps_tx_callback;
+	}
+
+	sps_config->options = SPS_O_EOT | SPS_O_DESC_DONE | SPS_O_AUTO_ENABLE;
+	sps_config->event_thresh = 0x10;
+
+	/* Allocate maximum descriptor fifo size */
+	sps_config->desc.size =
+		(1 + UART_DMA_DESC_NR) * sizeof(struct sps_iovec);
+	sps_config->desc.base = dma_alloc_coherent(msm_uport->uport.dev,
+						sps_config->desc.size,
+						&sps_config->desc.phys_base,
+						GFP_KERNEL);
+	if (!sps_config->desc.base) {
+		rc = -ENOMEM;
+		MSM_HS_ERR("msm_serial_hs: dma_alloc_coherent() failed!!\n");
+		goto get_config_err;
+	}
+	memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+	sps_event->mode = SPS_TRIGGER_CALLBACK;
+
+	sps_event->options = SPS_O_DESC_DONE | SPS_O_EOT;
+	sps_event->user = (void *)msm_uport;
+
+	/* Now save the sps pipe handle */
+	ep->pipe_handle = sps_pipe_handle;
+	MSM_HS_DBG("msm_serial_hs: success !! %s: pipe_handle=0x%p\n"
+		"desc_fifo.phys_base=0x%pa\n",
+		is_producer ? "READ" : "WRITE",
+		sps_pipe_handle, &sps_config->desc.phys_base);
+	return 0;
+
+get_config_err:
+	sps_free_endpoint(sps_pipe_handle);
+out:
+	return rc;
+}
+
+/**
+ * Initialize SPS HW connected with UART core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * msm_uport - Pointer to msm_hs_port structure
+ *
+ * @return - 0 if successful else negative value
+ */
+
+static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
+{
+	int rc = 0;
+	struct sps_bam_props bam = {0};
+	unsigned long bam_handle;
+
+	rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
+	if (rc || !bam_handle) {
+		bam.phys_addr = msm_uport->bam_mem;
+		bam.virt_addr = msm_uport->bam_base;
+		/*
+		 * This event thresold value is only significant for BAM-to-BAM
+		 * transfer. It's ignored for BAM-to-System mode transfer.
+		 */
+		bam.event_threshold = 0x10;	/* Pipe event threshold */
+		bam.summing_threshold = 1;	/* BAM event threshold */
+
+		/* SPS driver wll handle the UART BAM IRQ */
+		bam.irq = (u32)msm_uport->bam_irq;
+		bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
+
+		MSM_HS_DBG("msm_serial_hs: bam physical base=0x%pa\n",
+							&bam.phys_addr);
+		MSM_HS_DBG("msm_serial_hs: bam virtual base=0x%p\n",
+							bam.virt_addr);
+
+		/* Register UART Peripheral BAM device to SPS driver */
+		rc = sps_register_bam_device(&bam, &bam_handle);
+		if (rc) {
+			MSM_HS_ERR("%s: BAM device register failed\n",
+				  __func__);
+			return rc;
+		}
+		MSM_HS_DBG("%s:BAM device registered. bam_handle=0x%lx",
+			   __func__, msm_uport->bam_handle);
+	}
+	msm_uport->bam_handle = bam_handle;
+
+	rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
+				UART_SPS_PROD_PERIPHERAL);
+	if (rc) {
+		MSM_HS_ERR("%s: Failed to Init Producer BAM-pipe", __func__);
+		goto deregister_bam;
+	}
+
+	rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
+				UART_SPS_CONS_PERIPHERAL);
+	if (rc) {
+		MSM_HS_ERR("%s: Failed to Init Consumer BAM-pipe", __func__);
+		goto deinit_ep_conn_prod;
+	}
+	return 0;
+
+deinit_ep_conn_prod:
+	msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
+deregister_bam:
+	sps_deregister_bam_device(msm_uport->bam_handle);
+	return rc;
+}
+
+
+static bool deviceid[UARTDM_NR] = {0};
+/*
+ * The mutex synchronizes grabbing next free device number
+ * both in case of an alias being used or not. When alias is
+ * used, the msm_hs_dt_to_pdata gets it and the boolean array
+ * is accordingly updated with device_id_set_used. If no alias
+ * is used, then device_id_grab_next_free sets that array.
+ */
+static DEFINE_MUTEX(mutex_next_device_id);
+
+static int device_id_grab_next_free(void)
+{
+	int i;
+	int ret = -ENODEV;
+
+	mutex_lock(&mutex_next_device_id);
+	for (i = 0; i < UARTDM_NR; i++)
+		if (!deviceid[i]) {
+			ret = i;
+			deviceid[i] = true;
+			break;
+		}
+	mutex_unlock(&mutex_next_device_id);
+	return ret;
+}
+
+static int device_id_set_used(int index)
+{
+	int ret = 0;
+
+	mutex_lock(&mutex_next_device_id);
+	if (deviceid[index])
+		ret = -ENODEV;
+	else
+		deviceid[index] = true;
+	mutex_unlock(&mutex_next_device_id);
+	return ret;
+}
+
+static void obs_manage_irq(struct msm_hs_port *msm_uport, bool en)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (msm_uport->obs) {
+		if (en)
+			enable_irq(uport->irq);
+		else
+			disable_irq(uport->irq);
+	}
+}
+
+static void msm_hs_pm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	int ret;
+	int client_count = 0;
+
+	if (!msm_uport)
+		goto err_suspend;
+	mutex_lock(&msm_uport->mtx);
+
+	client_count = atomic_read(&msm_uport->client_count);
+	msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	msm_hs_resource_off(msm_uport);
+	obs_manage_irq(msm_uport, false);
+	msm_hs_clk_bus_unvote(msm_uport);
+
+	/* For OBS, don't use wakeup interrupt, set gpio to suspended state */
+	if (msm_uport->obs) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+			msm_uport->gpio_state_suspend);
+		if (ret)
+			MSM_HS_ERR("%s():Error selecting pinctrl suspend state",
+				__func__);
+	}
+
+	if (!atomic_read(&msm_uport->client_req_state))
+		enable_wakeup_interrupt(msm_uport);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s: PM State Suspended client_count %d\n", __func__,
+								client_count);
+	mutex_unlock(&msm_uport->mtx);
+	return;
+err_suspend:
+	pr_err("%s(): invalid uport", __func__);
+}
+
+static int msm_hs_pm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	int ret = 0;
+	int client_count = 0;
+
+	if (!msm_uport) {
+		dev_err(dev, "%s:Invalid uport\n", __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&msm_uport->mtx);
+	client_count = atomic_read(&msm_uport->client_count);
+	if (msm_uport->pm_state == MSM_HS_PM_ACTIVE)
+		goto exit_pm_resume;
+	if (!atomic_read(&msm_uport->client_req_state))
+		disable_wakeup_interrupt(msm_uport);
+
+	/* For OBS, don't use wakeup interrupt, set gpio to active state */
+	if (msm_uport->obs) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_active);
+		if (ret)
+			MSM_HS_ERR("%s():Error selecting active state",
+				 __func__);
+	}
+
+	ret = msm_hs_clk_bus_vote(msm_uport);
+	if (ret) {
+		MSM_HS_ERR("%s:Failed clock vote %d\n", __func__, ret);
+		dev_err(dev, "%s:Failed clock vote %d\n", __func__, ret);
+		goto exit_pm_resume;
+	}
+	obs_manage_irq(msm_uport, true);
+	msm_uport->pm_state = MSM_HS_PM_ACTIVE;
+	msm_hs_resource_on(msm_uport);
+
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s:PM State:Active client_count %d\n", __func__, client_count);
+exit_pm_resume:
+	mutex_unlock(&msm_uport->mtx);
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int msm_hs_pm_sys_suspend_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	enum msm_hs_pm_state prev_pwr_state;
+	int clk_cnt, client_count, ret = 0;
+
+	if (IS_ERR_OR_NULL(msm_uport))
+		return -ENODEV;
+
+	mutex_lock(&msm_uport->mtx);
+
+	/*
+	 * If there is an active clk request or an impending userspace request
+	 * fail the suspend callback.
+	 */
+	clk_cnt = atomic_read(&msm_uport->resource_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d\n",
+				 __func__, clk_cnt, client_count);
+		ret = -EBUSY;
+		goto exit_suspend_noirq;
+	}
+
+	prev_pwr_state = msm_uport->pm_state;
+	msm_uport->pm_state = MSM_HS_PM_SYS_SUSPENDED;
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s:PM State:Sys-Suspended client_count %d\n", __func__,
+								client_count);
+exit_suspend_noirq:
+	mutex_unlock(&msm_uport->mtx);
+	return ret;
+};
+
+static int msm_hs_pm_sys_resume_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	if (IS_ERR_OR_NULL(msm_uport))
+		return -ENODEV;
+	/*
+	 * Note system-pm resume and update the state
+	 * variable. Resource activation will be done
+	 * when transfer is requested.
+	 */
+
+	mutex_lock(&msm_uport->mtx);
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED)
+		msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s:PM State: Suspended\n", __func__);
+	mutex_unlock(&msm_uport->mtx);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static void  msm_serial_hs_rt_init(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	MSM_HS_INFO("%s(): Enabling runtime pm", __func__);
+	pm_runtime_set_suspended(uport->dev);
+	pm_runtime_set_autosuspend_delay(uport->dev, 100);
+	pm_runtime_use_autosuspend(uport->dev);
+	mutex_lock(&msm_uport->mtx);
+	msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	mutex_unlock(&msm_uport->mtx);
+	pm_runtime_enable(uport->dev);
+}
+
+static int msm_hs_runtime_suspend(struct device *dev)
+{
+	msm_hs_pm_suspend(dev);
+	return 0;
+}
+
+static int msm_hs_runtime_resume(struct device *dev)
+{
+	return msm_hs_pm_resume(dev);
+}
+#else
+static void  msm_serial_hs_rt_init(struct uart_port *uport) {}
+static int msm_hs_runtime_suspend(struct device *dev) {}
+static int msm_hs_runtime_resume(struct device *dev) {}
+#endif
+
+
+static int msm_hs_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct uart_port *uport;
+	struct msm_hs_port *msm_uport;
+	struct resource *core_resource;
+	struct resource *bam_resource;
+	int core_irqres, bam_irqres, wakeup_irqres;
+	struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
+	unsigned long data;
+	char name[30];
+
+	if (pdev->dev.of_node) {
+		dev_dbg(&pdev->dev, "device tree enabled\n");
+		pdata = msm_hs_dt_to_pdata(pdev);
+		if (IS_ERR(pdata))
+			return PTR_ERR(pdata);
+
+		if (pdev->id < 0) {
+			pdev->id = device_id_grab_next_free();
+			if (pdev->id < 0) {
+				dev_err(&pdev->dev,
+					"Error grabbing next free device id");
+				return pdev->id;
+			}
+		} else {
+			ret = device_id_set_used(pdev->id);
+			if (ret < 0) {
+				dev_err(&pdev->dev, "%d alias taken",
+					pdev->id);
+				return ret;
+			}
+		}
+		pdev->dev.platform_data = pdata;
+	}
+
+	if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
+		dev_err(&pdev->dev, "Invalid plaform device ID = %d\n",
+								pdev->id);
+		return -EINVAL;
+	}
+
+	msm_uport = devm_kzalloc(&pdev->dev, sizeof(struct msm_hs_port),
+			GFP_KERNEL);
+	if (!msm_uport)
+		return -ENOMEM;
+
+	msm_uport->uport.type = PORT_UNKNOWN;
+	uport = &msm_uport->uport;
+	uport->dev = &pdev->dev;
+
+	if (pdev->dev.of_node)
+		msm_uport->uart_type = BLSP_HSUART;
+
+	msm_hs_get_pinctrl_configs(uport);
+	/* Get required resources for BAM HSUART */
+	core_resource = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "core_mem");
+	if (!core_resource) {
+		dev_err(&pdev->dev, "Invalid core HSUART Resources.\n");
+		return -ENXIO;
+	}
+	bam_resource = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "bam_mem");
+	if (!bam_resource) {
+		dev_err(&pdev->dev, "Invalid BAM HSUART Resources.\n");
+		return -ENXIO;
+	}
+	core_irqres = platform_get_irq_byname(pdev, "core_irq");
+	if (core_irqres < 0) {
+		dev_err(&pdev->dev, "Error %d, invalid core irq resources.\n",
+			core_irqres);
+		return -ENXIO;
+	}
+	bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
+	if (bam_irqres < 0) {
+		dev_err(&pdev->dev, "Error %d, invalid bam irq resources.\n",
+			bam_irqres);
+		return -ENXIO;
+	}
+	wakeup_irqres = platform_get_irq_byname(pdev, "wakeup_irq");
+	if (wakeup_irqres < 0) {
+		wakeup_irqres = -1;
+		pr_info("Wakeup irq not specified.\n");
+	}
+
+	uport->mapbase = core_resource->start;
+
+	uport->membase = ioremap(uport->mapbase,
+				resource_size(core_resource));
+	if (unlikely(!uport->membase)) {
+		dev_err(&pdev->dev, "UART Resource ioremap Failed.\n");
+		return -ENOMEM;
+	}
+	msm_uport->bam_mem = bam_resource->start;
+	msm_uport->bam_base = ioremap(msm_uport->bam_mem,
+				resource_size(bam_resource));
+	if (unlikely(!msm_uport->bam_base)) {
+		dev_err(&pdev->dev, "UART BAM Resource ioremap Failed.\n");
+		iounmap(uport->membase);
+		return -ENOMEM;
+	}
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_state");
+	msm_uport->ipc_msm_hs_log_ctxt =
+			ipc_log_context_create(IPC_MSM_HS_LOG_STATE_PAGES,
+								name, 0);
+	if (!msm_uport->ipc_msm_hs_log_ctxt) {
+		dev_err(&pdev->dev, "%s: error creating logging context",
+								__func__);
+	} else {
+		msm_uport->ipc_debug_mask = INFO_LEV;
+		ret = sysfs_create_file(&pdev->dev.kobj,
+				&dev_attr_debug_mask.attr);
+		if (unlikely(ret))
+			MSM_HS_WARN("%s: Failed to create dev. attr", __func__);
+	}
+
+	uport->irq = core_irqres;
+	msm_uport->bam_irq = bam_irqres;
+	pdata->wakeup_irq = wakeup_irqres;
+
+	msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+	if (!msm_uport->bus_scale_table) {
+		MSM_HS_ERR("BLSP UART: Bus scaling is disabled.\n");
+	} else {
+		msm_uport->bus_perf_client =
+			msm_bus_scale_register_client
+				(msm_uport->bus_scale_table);
+		if (IS_ERR(&msm_uport->bus_perf_client)) {
+			MSM_HS_ERR("%s():Bus client register failed\n",
+				   __func__);
+			ret = -EINVAL;
+			goto unmap_memory;
+		}
+	}
+
+	msm_uport->wakeup.irq = pdata->wakeup_irq;
+	msm_uport->wakeup.ignore = 1;
+	msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
+	msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
+	msm_uport->obs = pdata->obs;
+
+	msm_uport->bam_tx_ep_pipe_index =
+			pdata->bam_tx_ep_pipe_index;
+	msm_uport->bam_rx_ep_pipe_index =
+			pdata->bam_rx_ep_pipe_index;
+	msm_uport->wakeup.enabled = true;
+
+	uport->iotype = UPIO_MEM;
+	uport->fifosize = 64;
+	uport->ops = &msm_hs_ops;
+	uport->flags = UPF_BOOT_AUTOCONF;
+	uport->uartclk = 7372800;
+	msm_uport->imr_reg = 0x0;
+
+	msm_uport->clk = clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(msm_uport->clk)) {
+		ret = PTR_ERR(msm_uport->clk);
+		goto deregister_bus_client;
+	}
+
+	msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
+	/*
+	 * Some configurations do not require explicit pclk control so
+	 * do not flag error on pclk get failure.
+	 */
+	if (IS_ERR(msm_uport->pclk))
+		msm_uport->pclk = NULL;
+
+	msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
+					WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!msm_uport->hsuart_wq) {
+		MSM_HS_ERR("%s(): Unable to create workqueue hsuart_wq\n",
+								__func__);
+		ret =  -ENOMEM;
+		goto put_clk;
+	}
+
+	mutex_init(&msm_uport->mtx);
+
+	/* Initialize SPS HW connected with UART core */
+	ret = msm_hs_sps_init(msm_uport);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("SPS Initialization failed ! err=%d", ret);
+		goto destroy_mutex;
+	}
+
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	msm_uport->rx.flush = FLUSH_SHUTDOWN;
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_tx");
+	msm_uport->tx.ipc_tx_ctxt =
+		ipc_log_context_create(IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+	if (!msm_uport->tx.ipc_tx_ctxt)
+		dev_err(&pdev->dev, "%s: error creating tx logging context",
+								__func__);
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_rx");
+	msm_uport->rx.ipc_rx_ctxt = ipc_log_context_create(
+					IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+	if (!msm_uport->rx.ipc_rx_ctxt)
+		dev_err(&pdev->dev, "%s: error creating rx logging context",
+								__func__);
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_pwr");
+	msm_uport->ipc_msm_hs_pwr_ctxt = ipc_log_context_create(
+					IPC_MSM_HS_LOG_USER_PAGES, name, 0);
+	if (!msm_uport->ipc_msm_hs_pwr_ctxt)
+		dev_err(&pdev->dev, "%s: error creating usr logging context",
+								__func__);
+
+	uport->irq = core_irqres;
+	msm_uport->bam_irq = bam_irqres;
+
+	clk_set_rate(msm_uport->clk, msm_uport->uport.uartclk);
+	msm_hs_clk_bus_vote(msm_uport);
+	ret = uartdm_init_port(uport);
+	if (unlikely(ret))
+		goto err_clock;
+
+	/* configure the CR Protection to Enable */
+	msm_hs_write(uport, UART_DM_CR, CR_PROTECTION_EN);
+
+	/*
+	 * Enable Command register protection before going ahead as this hw
+	 * configuration makes sure that issued cmd to CR register gets complete
+	 * before next issued cmd start. Hence mb() requires here.
+	 */
+	mb();
+
+	/*
+	 * Set RX_BREAK_ZERO_CHAR_OFF and RX_ERROR_CHAR_OFF
+	 * so any rx_break and character having parity of framing
+	 * error don't enter inside UART RX FIFO.
+	 */
+	data = msm_hs_read(uport, UART_DM_MR2);
+	data |= (UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF |
+			UARTDM_MR2_RX_ERROR_CHAR_OFF);
+	msm_hs_write(uport, UART_DM_MR2, data);
+	/* Ensure register IO completion */
+	mb();
+
+	ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("Probe Failed as sysfs failed\n");
+		goto err_clock;
+	}
+
+	msm_serial_debugfs_init(msm_uport, pdev->id);
+	msm_hs_unconfig_uart_gpios(uport);
+
+	uport->line = pdev->id;
+	if (pdata->userid && pdata->userid <= UARTDM_NR)
+		uport->line = pdata->userid;
+	ret = uart_add_one_port(&msm_hs_driver, uport);
+	if (!ret) {
+		msm_hs_clk_bus_unvote(msm_uport);
+		msm_serial_hs_rt_init(uport);
+		return ret;
+	}
+
+err_clock:
+	msm_hs_clk_bus_unvote(msm_uport);
+
+destroy_mutex:
+	mutex_destroy(&msm_uport->mtx);
+	destroy_workqueue(msm_uport->hsuart_wq);
+
+put_clk:
+	if (msm_uport->pclk)
+		clk_put(msm_uport->pclk);
+
+	if (msm_uport->clk)
+		clk_put(msm_uport->clk);
+
+deregister_bus_client:
+	msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
+unmap_memory:
+	iounmap(uport->membase);
+	iounmap(msm_uport->bam_base);
+
+	return ret;
+}
+
+static int __init msm_serial_hs_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&msm_hs_driver);
+	if (unlikely(ret)) {
+		pr_err("%s failed to load\n", __func__);
+		return ret;
+	}
+	debug_base = debugfs_create_dir("msm_serial_hs", NULL);
+	if (IS_ERR_OR_NULL(debug_base))
+		pr_err("msm_serial_hs: Cannot create debugfs dir\n");
+
+	ret = platform_driver_register(&msm_serial_hs_platform_driver);
+	if (ret) {
+		pr_err("%s failed to load\n", __func__);
+		debugfs_remove_recursive(debug_base);
+		uart_unregister_driver(&msm_hs_driver);
+		return ret;
+	}
+
+	pr_info("msm_serial_hs module loaded\n");
+	return ret;
+}
+
+/*
+ *  Called by the upper layer when port is closed.
+ *     - Disables the port
+ *     - Unhook the ISR
+ */
+static void msm_hs_shutdown(struct uart_port *uport)
+{
+	int ret, rc;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	int data;
+	unsigned long flags;
+
+	if (is_use_low_power_wakeup(msm_uport))
+		irq_set_irq_wake(msm_uport->wakeup.irq, 0);
+
+	if (msm_uport->wakeup.enabled)
+		disable_irq(msm_uport->wakeup.irq);
+	else
+		disable_irq(uport->irq);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_uport->wakeup.enabled = false;
+	msm_uport->wakeup.ignore = 1;
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	/* Free the interrupt */
+	free_irq(uport->irq, msm_uport);
+	if (is_use_low_power_wakeup(msm_uport)) {
+		free_irq(msm_uport->wakeup.irq, msm_uport);
+		MSM_HS_DBG("%s(): wakeup irq freed", __func__);
+	}
+	msm_uport->wakeup.freed = true;
+
+	/* make sure tx lh finishes */
+	kthread_flush_worker(&msm_uport->tx.kworker);
+	ret = wait_event_timeout(msm_uport->tx.wait,
+			uart_circ_empty(tx_buf), 500);
+	if (!ret)
+		MSM_HS_WARN("Shutdown called when tx buff not empty");
+
+	msm_hs_resource_vote(msm_uport);
+	/* Stop remote side from sending data */
+	msm_hs_disable_flow_control(uport, false);
+	/* make sure rx lh finishes */
+	kthread_flush_worker(&msm_uport->rx.kworker);
+
+	if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
+		/* disable and disconnect rx */
+		ret = wait_event_timeout(msm_uport->rx.wait,
+				!msm_uport->rx.pending_flag, 500);
+		if (!ret)
+			MSM_HS_WARN("%s(): rx disconnect not complete",
+				__func__);
+		msm_hs_disconnect_rx(uport);
+	}
+
+	cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
+	flush_workqueue(msm_uport->hsuart_wq);
+
+	/* BAM Disconnect for TX */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	ret = sps_tx_disconnect(msm_uport);
+	if (ret)
+		MSM_HS_ERR("%s(): sps_disconnect failed\n",
+					__func__);
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	/* Disable the transmitter */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_DISABLE_BMSK);
+	/* Disable the receiver */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
+
+	msm_uport->imr_reg = 0;
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/*
+	 * Complete all device write before actually disabling uartclk.
+	 * Hence mb() requires here.
+	 */
+	mb();
+
+	msm_uport->rx.buffer_pending = NONE_PENDING;
+	MSM_HS_DBG("%s(): tx, rx events complete", __func__);
+
+	dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
+			 UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+	msm_hs_resource_unvote(msm_uport);
+	rc = atomic_read(&msm_uport->resource_count);
+	if (rc) {
+		atomic_set(&msm_uport->resource_count, 1);
+		MSM_HS_WARN("%s(): removing extra vote\n", __func__);
+		msm_hs_resource_unvote(msm_uport);
+	}
+	if (atomic_read(&msm_uport->client_req_state)) {
+		MSM_HS_WARN("%s: Client clock vote imbalance\n", __func__);
+		atomic_set(&msm_uport->client_req_state, 0);
+	}
+	if (atomic_read(&msm_uport->client_count)) {
+		MSM_HS_WARN("%s: Client vote on, forcing to 0\n", __func__);
+		atomic_set(&msm_uport->client_count, 0);
+		LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s: Client_Count 0\n", __func__);
+	}
+	msm_hs_unconfig_uart_gpios(uport);
+	MSM_HS_INFO("%s:UART port closed successfully\n", __func__);
+}
+
+static void __exit msm_serial_hs_exit(void)
+{
+	pr_info("msm_serial_hs module removed\n");
+	debugfs_remove_recursive(debug_base);
+	platform_driver_unregister(&msm_serial_hs_platform_driver);
+	uart_unregister_driver(&msm_hs_driver);
+}
+
+static const struct dev_pm_ops msm_hs_dev_pm_ops = {
+	.runtime_suspend = msm_hs_runtime_suspend,
+	.runtime_resume = msm_hs_runtime_resume,
+	.runtime_idle = NULL,
+	.suspend_noirq = msm_hs_pm_sys_suspend_noirq,
+	.resume_noirq = msm_hs_pm_sys_resume_noirq,
+};
+
+static struct platform_driver msm_serial_hs_platform_driver = {
+	.probe	= msm_hs_probe,
+	.remove = msm_hs_remove,
+	.driver = {
+		.name = "msm_serial_hs",
+		.pm   = &msm_hs_dev_pm_ops,
+		.of_match_table = msm_hs_match_table,
+	},
+};
+
+static struct uart_driver msm_hs_driver = {
+	.owner = THIS_MODULE,
+	.driver_name = "msm_serial_hs",
+	.dev_name = "ttyHS",
+	.nr = UARTDM_NR,
+	.cons = 0,
+};
+
+static const struct uart_ops msm_hs_ops = {
+	.tx_empty = msm_hs_tx_empty,
+	.set_mctrl = msm_hs_set_mctrl_locked,
+	.get_mctrl = msm_hs_get_mctrl_locked,
+	.stop_tx = msm_hs_stop_tx_locked,
+	.start_tx = msm_hs_start_tx_locked,
+	.stop_rx = msm_hs_stop_rx_locked,
+	.enable_ms = msm_hs_enable_ms_locked,
+	.break_ctl = msm_hs_break_ctl,
+	.startup = msm_hs_startup,
+	.shutdown = msm_hs_shutdown,
+	.set_termios = msm_hs_set_termios,
+	.type = msm_hs_type,
+	.config_port = msm_hs_config_port,
+	.flush_buffer = NULL,
+	.ioctl = msm_hs_ioctl,
+};
+
+module_init(msm_serial_hs_init);
+module_exit(msm_serial_hs_exit);
+MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/msm_serial_hs_hwreg.h b/drivers/tty/serial/msm_serial_hs_hwreg.h
new file mode 100644
index 0000000..d5ce41f
--- /dev/null
+++ b/drivers/tty/serial/msm_serial_hs_hwreg.h
@@ -0,0 +1,283 @@
+/* drivers/serial/msm_serial_hs_hwreg.h
+ *
+ * Copyright (c) 2007-2009, 2012-2014,The Linux Foundation. All rights reserved.
+ *
+ * All source code in this file is licensed under the following license
+ * except where indicated.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#ifndef MSM_SERIAL_HS_HWREG_H
+#define MSM_SERIAL_HS_HWREG_H
+
+#define GSBI_CONTROL_ADDR              0x0
+#define GSBI_PROTOCOL_CODE_MASK        0x30
+#define GSBI_PROTOCOL_I2C_UART         0x60
+#define GSBI_PROTOCOL_UART             0x40
+#define GSBI_PROTOCOL_IDLE             0x0
+
+#define TCSR_ADM_1_A_CRCI_MUX_SEL      0x78
+#define TCSR_ADM_1_B_CRCI_MUX_SEL      0x7C
+#define ADM1_CRCI_GSBI6_RX_SEL         0x800
+#define ADM1_CRCI_GSBI6_TX_SEL         0x400
+
+#define MSM_ENABLE_UART_CLOCK TIOCPMGET
+#define MSM_DISABLE_UART_CLOCK TIOCPMPUT
+#define MSM_GET_UART_CLOCK_STATUS TIOCPMACT
+
+enum msm_hsl_regs {
+	UARTDM_MR1,
+	UARTDM_MR2,
+	UARTDM_IMR,
+	UARTDM_SR,
+	UARTDM_CR,
+	UARTDM_CSR,
+	UARTDM_IPR,
+	UARTDM_ISR,
+	UARTDM_RX_TOTAL_SNAP,
+	UARTDM_RFWR,
+	UARTDM_TFWR,
+	UARTDM_RF,
+	UARTDM_TF,
+	UARTDM_MISR,
+	UARTDM_DMRX,
+	UARTDM_NCF_TX,
+	UARTDM_DMEN,
+	UARTDM_BCR,
+	UARTDM_TXFS,
+	UARTDM_RXFS,
+	UARTDM_LAST,
+};
+
+enum msm_hs_regs {
+	UART_DM_MR1 = 0x0,
+	UART_DM_MR2 = 0x4,
+	UART_DM_IMR = 0xb0,
+	UART_DM_SR = 0xa4,
+	UART_DM_CR = 0xa8,
+	UART_DM_CSR = 0xa0,
+	UART_DM_IPR = 0x18,
+	UART_DM_ISR = 0xb4,
+	UART_DM_RX_TOTAL_SNAP = 0xbc,
+	UART_DM_TFWR = 0x1c,
+	UART_DM_RFWR = 0x20,
+	UART_DM_RF = 0x140,
+	UART_DM_TF = 0x100,
+	UART_DM_MISR = 0xac,
+	UART_DM_DMRX = 0x34,
+	UART_DM_NCF_TX = 0x40,
+	UART_DM_DMEN = 0x3c,
+	UART_DM_TXFS = 0x4c,
+	UART_DM_RXFS = 0x50,
+	UART_DM_RX_TRANS_CTRL = 0xcc,
+	UART_DM_BCR = 0xc8,
+};
+
+#define UARTDM_MR1_ADDR 0x0
+#define UARTDM_MR2_ADDR 0x4
+
+/* Backward Compatibility Register for UARTDM Core v1.4 */
+#define UARTDM_BCR_ADDR	0xc8
+
+/*
+ * UARTDM Core v1.4 STALE_IRQ_EMPTY bit defination
+ * Stale interrupt will fire if bit is set when RX-FIFO is empty
+ */
+#define UARTDM_BCR_TX_BREAK_DISABLE	0x1
+#define UARTDM_BCR_STALE_IRQ_EMPTY	0x2
+#define UARTDM_BCR_RX_DMRX_LOW_EN	0x4
+#define UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL	0x10
+#define UARTDM_BCR_RX_DMRX_1BYTE_RES_EN	0x20
+
+/* TRANSFER_CONTROL Register for UARTDM Core v1.4 */
+#define UARTDM_RX_TRANS_CTRL_ADDR      0xcc
+
+/* TRANSFER_CONTROL Register bits */
+#define RX_STALE_AUTO_RE_EN		0x1
+#define RX_TRANS_AUTO_RE_ACTIVATE	0x2
+#define RX_DMRX_CYCLIC_EN		0x4
+
+/* write only register */
+#define UARTDM_CSR_115200 0xFF
+#define UARTDM_CSR_57600  0xEE
+#define UARTDM_CSR_38400  0xDD
+#define UARTDM_CSR_28800  0xCC
+#define UARTDM_CSR_19200  0xBB
+#define UARTDM_CSR_14400  0xAA
+#define UARTDM_CSR_9600   0x99
+#define UARTDM_CSR_7200   0x88
+#define UARTDM_CSR_4800   0x77
+#define UARTDM_CSR_3600   0x66
+#define UARTDM_CSR_2400   0x55
+#define UARTDM_CSR_1200   0x44
+#define UARTDM_CSR_600    0x33
+#define UARTDM_CSR_300    0x22
+#define UARTDM_CSR_150    0x11
+#define UARTDM_CSR_75     0x00
+
+/* write only register */
+#define UARTDM_IPR_ADDR 0x18
+#define UARTDM_TFWR_ADDR 0x1c
+#define UARTDM_RFWR_ADDR 0x20
+#define UARTDM_HCR_ADDR 0x24
+#define UARTDM_DMRX_ADDR 0x34
+#define UARTDM_DMEN_ADDR 0x3c
+
+/* UART_DM_NO_CHARS_FOR_TX */
+#define UARTDM_NCF_TX_ADDR 0x40
+
+#define UARTDM_BADR_ADDR 0x44
+
+#define UARTDM_SIM_CFG_ADDR 0x80
+
+/* Read Only register */
+#define UARTDM_TXFS_ADDR 0x4C
+#define UARTDM_RXFS_ADDR 0x50
+
+/* Register field Mask Mapping */
+#define UARTDM_SR_RX_BREAK_BMSK	        BIT(6)
+#define UARTDM_SR_PAR_FRAME_BMSK	BIT(5)
+#define UARTDM_SR_OVERRUN_BMSK		BIT(4)
+#define UARTDM_SR_TXEMT_BMSK		BIT(3)
+#define UARTDM_SR_TXRDY_BMSK		BIT(2)
+#define UARTDM_SR_RXRDY_BMSK		BIT(0)
+
+#define UARTDM_CR_TX_DISABLE_BMSK	BIT(3)
+#define UARTDM_CR_RX_DISABLE_BMSK	BIT(1)
+#define UARTDM_CR_TX_EN_BMSK		BIT(2)
+#define UARTDM_CR_RX_EN_BMSK		BIT(0)
+
+/* UARTDM_CR channel_comman bit value (register field is bits 8:4) */
+#define RESET_RX		0x10
+#define RESET_TX		0x20
+#define RESET_ERROR_STATUS	0x30
+#define RESET_BREAK_INT		0x40
+#define START_BREAK		0x50
+#define STOP_BREAK		0x60
+#define RESET_CTS		0x70
+#define RESET_STALE_INT		0x80
+#define RFR_LOW			0xD0
+#define RFR_HIGH		0xE0
+#define CR_PROTECTION_EN	0x100
+#define STALE_EVENT_ENABLE	0x500
+#define STALE_EVENT_DISABLE	0x600
+#define FORCE_STALE_EVENT	0x400
+#define CLEAR_TX_READY		0x300
+#define RESET_TX_ERROR		0x800
+#define RESET_TX_DONE		0x810
+
+/*
+ * UARTDM_CR BAM IFC comman bit value
+ * for UARTDM Core v1.4
+ */
+#define START_RX_BAM_IFC	0x850
+#define START_TX_BAM_IFC	0x860
+
+#define UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00
+#define UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f
+#define UARTDM_MR1_CTS_CTL_BMSK 0x40
+#define UARTDM_MR1_RX_RDY_CTL_BMSK 0x80
+
+/*
+ * UARTDM Core v1.4 MR2_RFR_CTS_LOOP bitmask
+ * Enables internal loopback between RFR_N of
+ * RX channel and CTS_N of TX channel.
+ */
+#define UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK	0x400
+
+#define UARTDM_MR2_LOOP_MODE_BMSK		0x80
+#define UARTDM_MR2_ERROR_MODE_BMSK		0x40
+#define UARTDM_MR2_BITS_PER_CHAR_BMSK		0x30
+#define UARTDM_MR2_RX_ZERO_CHAR_OFF		0x100
+#define UARTDM_MR2_RX_ERROR_CHAR_OFF		0x200
+#define UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF	0x100
+
+#define UARTDM_MR2_BITS_PER_CHAR_8	(0x3 << 4)
+
+/* bits per character configuration */
+#define FIVE_BPC  (0 << 4)
+#define SIX_BPC   (1 << 4)
+#define SEVEN_BPC (2 << 4)
+#define EIGHT_BPC (3 << 4)
+
+#define UARTDM_MR2_STOP_BIT_LEN_BMSK 0xc
+#define STOP_BIT_ONE (1 << 2)
+#define STOP_BIT_TWO (3 << 2)
+
+#define UARTDM_MR2_PARITY_MODE_BMSK 0x3
+
+/* Parity configuration */
+#define NO_PARITY 0x0
+#define EVEN_PARITY 0x2
+#define ODD_PARITY 0x1
+#define SPACE_PARITY 0x3
+
+#define UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80
+#define UARTDM_IPR_STALE_LSB_BMSK 0x1f
+
+/* These can be used for both ISR and IMR register */
+#define UARTDM_ISR_TX_READY_BMSK	BIT(7)
+#define UARTDM_ISR_CURRENT_CTS_BMSK	BIT(6)
+#define UARTDM_ISR_DELTA_CTS_BMSK	BIT(5)
+#define UARTDM_ISR_RXLEV_BMSK		BIT(4)
+#define UARTDM_ISR_RXSTALE_BMSK		BIT(3)
+#define UARTDM_ISR_RXBREAK_BMSK		BIT(2)
+#define UARTDM_ISR_RXHUNT_BMSK		BIT(1)
+#define UARTDM_ISR_TXLEV_BMSK		BIT(0)
+
+/* Field definitions for UART_DM_DMEN*/
+#define UARTDM_TX_DM_EN_BMSK 0x1
+#define UARTDM_RX_DM_EN_BMSK 0x2
+
+/*
+ * UARTDM Core v1.4 bitmask
+ * Bitmasks for enabling Rx and Tx BAM Interface
+ */
+#define UARTDM_TX_BAM_ENABLE_BMSK 0x4
+#define UARTDM_RX_BAM_ENABLE_BMSK 0x8
+
+/* Register offsets for UART Core v13 */
+
+/* write only register */
+#define UARTDM_CSR_ADDR    0x8
+
+/* write only register */
+#define UARTDM_TF_ADDR   0x70
+#define UARTDM_TF2_ADDR  0x74
+#define UARTDM_TF3_ADDR  0x78
+#define UARTDM_TF4_ADDR  0x7c
+
+/* write only register */
+#define UARTDM_CR_ADDR 0x10
+/* write only register */
+#define UARTDM_IMR_ADDR 0x14
+#define UARTDM_IRDA_ADDR 0x38
+
+/* Read Only register */
+#define UARTDM_SR_ADDR 0x8
+
+/* Read Only register */
+#define UARTDM_RF_ADDR   0x70
+#define UARTDM_RF2_ADDR  0x74
+#define UARTDM_RF3_ADDR  0x78
+#define UARTDM_RF4_ADDR  0x7c
+
+/* Read Only register */
+#define UARTDM_MISR_ADDR 0x10
+
+/* Read Only register */
+#define UARTDM_ISR_ADDR 0x14
+#define UARTDM_RX_TOTAL_SNAP_ADDR 0x38
+
+#endif /* MSM_SERIAL_HS_HWREG_H */
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 3e459b0..4b78e02 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -428,6 +428,9 @@ static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
 {
 	struct dwc3_event_buffer	*evt;
 
+	if (!dwc->ev_buf)
+		return;
+
 	evt = dwc->ev_buf;
 
 	evt->lpos = 0;
@@ -692,8 +695,6 @@ static void dwc3_core_exit(struct dwc3 *dwc)
 	phy_exit(dwc->usb2_generic_phy);
 	phy_exit(dwc->usb3_generic_phy);
 
-	usb_phy_set_suspend(dwc->usb2_phy, 1);
-	usb_phy_set_suspend(dwc->usb3_phy, 1);
 	phy_power_off(dwc->usb2_generic_phy);
 	phy_power_off(dwc->usb3_generic_phy);
 }
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index b042152..5af75fd 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -951,6 +951,7 @@ struct dwc3_scratchpad_array {
  * @last_fifo_depth: total TXFIFO depth of all enabled USB IN/INT endpoints
  * @imod_interval: set the interrupt moderation interval in 250ns
  *			increments or 0 to disable.
+ * @create_reg_debugfs: create debugfs entry to allow dwc3 register dump
  */
 struct dwc3 {
 	struct usb_ctrlrequest	*ctrl_req;
@@ -1145,6 +1146,7 @@ struct dwc3 {
 	void			*dwc_ipc_log_ctxt;
 	int			last_fifo_depth;
 	struct dwc3_gadget_events	dbg_gadget_events;
+	bool			create_reg_debugfs;
 };
 
 /* -------------------------------------------------------------------------- */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 260092c..aebad09 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -291,6 +291,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
 	unsigned long		flags;
 	u32			reg;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 	spin_unlock_irqrestore(&dwc->lock, flags);
@@ -326,6 +331,11 @@ static ssize_t dwc3_mode_write(struct file *file,
 	u32			mode = 0;
 	char buf[32] = {};
 
+	if (atomic_read(&dwc->in_lpm)) {
+		dev_err(dwc->dev, "USB device is powered off\n");
+		return count;
+	}
+
 	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
 		return -EFAULT;
 
@@ -360,6 +370,12 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
 	unsigned long		flags;
 	u32			reg;
 
+
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 	reg &= DWC3_DCTL_TSTCTRL_MASK;
@@ -406,6 +422,11 @@ static ssize_t dwc3_testmode_write(struct file *file,
 	u32			testmode = 0;
 	char			buf[32] = {};
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
 		return -EFAULT;
 
@@ -444,6 +465,11 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
 	enum dwc3_link_state	state;
 	u32			reg;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
 	state = DWC3_DSTS_USBLNKST(reg);
@@ -513,6 +539,11 @@ static ssize_t dwc3_link_state_write(struct file *file,
 	enum dwc3_link_state	state = 0;
 	char			buf[32] = {};
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
 		return -EFAULT;
 
@@ -558,6 +589,11 @@ static int dwc3_tx_fifo_queue_show(struct seq_file *s, void *unused)
 	unsigned long		flags;
 	u32			val;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	val = dwc3_core_fifo_space(dep, DWC3_TXFIFOQ);
 	seq_printf(s, "%u\n", val);
@@ -573,6 +609,11 @@ static int dwc3_rx_fifo_queue_show(struct seq_file *s, void *unused)
 	unsigned long		flags;
 	u32			val;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	val = dwc3_core_fifo_space(dep, DWC3_RXFIFOQ);
 	seq_printf(s, "%u\n", val);
@@ -588,6 +629,11 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
 	unsigned long		flags;
 	u32			val;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
 	seq_printf(s, "%u\n", val);
@@ -603,6 +649,11 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
 	unsigned long		flags;
 	u32			val;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
 	seq_printf(s, "%u\n", val);
@@ -618,6 +669,11 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
 	unsigned long		flags;
 	u32			val;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
 	seq_printf(s, "%u\n", val);
@@ -633,6 +689,11 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
 	unsigned long		flags;
 	u32			val;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
 	seq_printf(s, "%u\n", val);
@@ -648,6 +709,11 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused)
 	unsigned long		flags;
 	u32			val;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		seq_puts(s, "USB device is powered off\n");
+		return 0;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
 	seq_printf(s, "%u\n", val);
@@ -1044,9 +1110,12 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
 	dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
 	dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
 
-	file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
-	if (!file)
-		dev_dbg(dwc->dev, "Can't create debugfs regdump\n");
+	if (dwc->create_reg_debugfs) {
+		file = debugfs_create_regset32("regdump", 0444,
+						root, dwc->regset);
+		if (!file)
+			dev_dbg(dwc->dev, "Can't create debugfs regdump\n");
+	}
 
 	if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
 		file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root,
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index d316821..ccdf543 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1867,6 +1867,9 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
 		dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_SETUP\n");
 		for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
 			evt = mdwc->gsi_ev_buff[i];
+			if (!evt)
+				break;
+
 			dev_dbg(mdwc->dev, "Evt buf %p dma %08llx length %d\n",
 				evt->buf, (unsigned long long) evt->dma,
 				evt->length);
@@ -1892,6 +1895,9 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
 		break;
 	case DWC3_GSI_EVT_BUF_CLEANUP:
 		dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_CLEANUP\n");
+		if (!mdwc->gsi_ev_buff)
+			break;
+
 		for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
 			evt = mdwc->gsi_ev_buff[i];
 			evt->lpos = 0;
@@ -1911,6 +1917,9 @@ static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
 		break;
 	case DWC3_GSI_EVT_BUF_FREE:
 		dev_dbg(mdwc->dev, "DWC3_GSI_EVT_BUF_FREE\n");
+		if (!mdwc->gsi_ev_buff)
+			break;
+
 		for (i = 0; i < mdwc->num_gsi_event_buffers; i++) {
 			evt = mdwc->gsi_ev_buff[i];
 			if (evt)
@@ -3145,7 +3154,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
 	ret = dwc3_msm_get_clk_gdsc(mdwc);
 	if (ret) {
 		dev_err(&pdev->dev, "error getting clock or gdsc.\n");
-		return ret;
+		goto err;
 	}
 
 	mdwc->id_state = DWC3_ID_FLOAT;
@@ -3441,24 +3450,20 @@ static int dwc3_msm_probe(struct platform_device *pdev)
 	return 0;
 
 put_dwc3:
-	platform_device_put(mdwc->dwc3);
 	if (mdwc->bus_perf_client)
 		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
+
 uninit_iommu:
 	if (mdwc->iommu_map) {
 		arm_iommu_detach_device(mdwc->dev);
 		arm_iommu_release_mapping(mdwc->iommu_map);
 	}
+	of_platform_depopulate(&pdev->dev);
 err:
+	destroy_workqueue(mdwc->dwc3_wq);
 	return ret;
 }
 
-static int dwc3_msm_remove_children(struct device *dev, void *data)
-{
-	device_unregister(dev);
-	return 0;
-}
-
 static int dwc3_msm_remove(struct platform_device *pdev)
 {
 	struct dwc3_msm	*mdwc = platform_get_drvdata(pdev);
@@ -3495,8 +3500,7 @@ static int dwc3_msm_remove(struct platform_device *pdev)
 
 	if (mdwc->hs_phy)
 		mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
-	platform_device_put(mdwc->dwc3);
-	device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
+	of_platform_depopulate(&pdev->dev);
 
 	dbg_event(0xFF, "Remov put", 0);
 	pm_runtime_disable(mdwc->dev);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 89de903..14c02031 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -307,7 +307,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
 
 	hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
 
-	if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
+	if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
 		xhci->quirks |= XHCI_LPM_SUPPORT;
 
 	if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index a8003cc7..32c3d42 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -267,7 +267,7 @@ static inline int coresight_timeout(void __iomem *addr, u32 offset,
 static inline void coresight_abort(void) {}
 #endif
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_CORESIGHT)
 extern struct coresight_platform_data *of_get_coresight_platform_data(
 				struct device *dev, struct device_node *node);
 extern struct coresight_cti_data *of_get_coresight_cti_data(
@@ -275,7 +275,7 @@ extern struct coresight_cti_data *of_get_coresight_cti_data(
 #else
 static inline struct coresight_platform_data *of_get_coresight_platform_data(
 	struct device *dev, struct device_node *node) { return NULL; }
-static inlint struct coresight_cti_data *of_get_coresight_cti_data(
+static inline struct coresight_cti_data *of_get_coresight_cti_data(
 		struct device *dev, struct device_node *node) { return NULL; }
 #endif
 
diff --git a/include/linux/platform_data/msm_serial_hs.h b/include/linux/platform_data/msm_serial_hs.h
new file mode 100644
index 0000000..72c76e5
--- /dev/null
+++ b/include/linux/platform_data/msm_serial_hs.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2010-2014, The Linux Foundation. All rights reserved.
+ * Author: Nick Pelly <npelly@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_SERIAL_HS_H
+#define __ASM_ARCH_MSM_SERIAL_HS_H
+
+#include <linux/serial_core.h>
+
+/**
+ * struct msm_serial_hs_platform_data - platform device data
+ *					for msm hsuart device
+ * @wakeup_irq : IRQ line to be configured as Wakeup source.
+ * @inject_rx_on_wakeup : Set 1 if specific character to be inserted on wakeup
+ * @rx_to_inject : Character to be inserted on wakeup
+ * @gpio_config : Configure gpios that are used for uart communication
+ * @userid : User-defined number to be used to enumerate device as tty<userid>
+ * @uart_tx_gpio: GPIO number for UART Tx Line.
+ * @uart_rx_gpio: GPIO number for UART Rx Line.
+ * @uart_cts_gpio: GPIO number for UART CTS Line.
+ * @uart_rfr_gpio: GPIO number for UART RFR Line.
+ * @bam_tx_ep_pipe_index : BAM TX Endpoint Pipe Index for HSUART
+ * @bam_tx_ep_pipe_index : BAM RX Endpoint Pipe Index for HSUART
+ * @no_suspend_delay : Flag used to make system go to suspend
+ * immediately or not
+ * @obs: Flag to enable out of band sleep feature support
+ */
+struct msm_serial_hs_platform_data {
+	int wakeup_irq;  /* wakeup irq */
+	bool inject_rx_on_wakeup;
+	u8 rx_to_inject;
+	int (*gpio_config)(int);
+	int userid;
+
+	int uart_tx_gpio;
+	int uart_rx_gpio;
+	int uart_cts_gpio;
+	int uart_rfr_gpio;
+	unsigned int bam_tx_ep_pipe_index;
+	unsigned int bam_rx_ep_pipe_index;
+	bool no_suspend_delay;
+	bool obs;
+};
+
+/* return true when tx is empty */
+unsigned int msm_hs_tx_empty(struct uart_port *uport);
+int msm_hs_request_clock_off(struct uart_port *uport);
+int msm_hs_request_clock_on(struct uart_port *uport);
+struct uart_port *msm_hs_get_uart_port(int port_index);
+void msm_hs_set_mctrl(struct uart_port *uport,
+				    unsigned int mctrl);
+#endif
diff --git a/include/linux/usb_bam.h b/include/linux/usb_bam.h
index 62fd4e4..1b0ca4a 100644
--- a/include/linux/usb_bam.h
+++ b/include/linux/usb_bam.h
@@ -198,7 +198,7 @@ struct usb_bam_pipe_connect {
 };
 
 /**
- * struct msm_usb_bam_platform_data: pipe connection information
+ * struct msm_usb_bam_data: pipe connection information
  * between USB/HSIC BAM and another BAM. USB/HSIC BAM can be
  * either src BAM or dst BAM
  * @usb_bam_num_pipes: max number of pipes to use.
@@ -218,7 +218,7 @@ struct usb_bam_pipe_connect {
  *		can work at in bam2bam mode when connected to SS host.
  * @enable_hsusb_bam_on_boot: Enable HSUSB BAM (non-NDP) on bootup itself
  */
-struct msm_usb_bam_platform_data {
+struct msm_usb_bam_data {
 	u8 max_connections;
 	int usb_bam_num_pipes;
 	phys_addr_t usb_bam_fifo_baseaddr;
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index c0475a2..94ee287 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -20,6 +20,29 @@ TRACE_DEFINE_ENUM(THERMAL_TRIP_ACTIVE);
 			 { THERMAL_TRIP_PASSIVE,  "PASSIVE"},	\
 			 { THERMAL_TRIP_ACTIVE,   "ACTIVE"})
 
+TRACE_EVENT(thermal_query_temp,
+
+	TP_PROTO(struct thermal_zone_device *tz, int temp),
+
+	TP_ARGS(tz, temp),
+
+	TP_STRUCT__entry(
+		__string(thermal_zone, tz->type)
+		__field(int, id)
+		__field(int, temp)
+	),
+
+	TP_fast_assign(
+		__assign_str(thermal_zone, tz->type);
+		__entry->id = tz->id;
+		__entry->temp = temp;
+	),
+
+	TP_printk("thermal_zone=%s id=%d temp=%d",
+		__get_str(thermal_zone), __entry->id,
+		__entry->temp)
+);
+
 TRACE_EVENT(thermal_temperature,
 
 	TP_PROTO(struct thermal_zone_device *tz),
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index 5f70a57..0765527 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -174,6 +174,17 @@ struct drm_msm_igc_lut {
 	__u32 strength;
 };
 
+#define HIST_V_SIZE 256
+/**
+ * struct drm_msm_hist - histogram feature structure
+ * @flags: for customizing operations
+ * @data: histogram data
+ */
+struct drm_msm_hist {
+	__u64 flags;
+	__u32 data[HIST_V_SIZE];
+};
+
 #define AD4_LUT_GRP0_SIZE 33
 #define AD4_LUT_GRP1_SIZE 32
 /*
diff --git a/include/uapi/media/cam_isp.h b/include/uapi/media/cam_isp.h
index 9253bc7..05c9283 100644
--- a/include/uapi/media/cam_isp.h
+++ b/include/uapi/media/cam_isp.h
@@ -73,6 +73,7 @@
 #define CAM_ISP_PACKET_META_DMI_COMMON          6
 #define CAM_ISP_PACKET_META_CLOCK               7
 #define CAM_ISP_PACKET_META_CSID                8
+#define CAM_ISP_PACKET_META_DUAL_CONFIG         9
 #define CAM_ISP_PACKET_META_GENERIC_BLOB        10
 #define CAM_ISP_PACKET_META_MAX                 11
 
@@ -261,4 +262,60 @@ struct cam_isp_resource_hfr_config {
 	struct cam_isp_port_hfr_config io_hfr_config[1];
 };
 
+/**
+ * struct cam_isp_dual_split_params - dual isp spilt parameters
+ *
+ * @split_point:                Split point information x, where (0 < x < width)
+ *                              left ISP's input ends at x + righ padding and
+ *                              Right ISP's input starts at x - left padding
+ * @right_padding:              Padding added past the split point for left
+ *                              ISP's input
+ * @left_padding:               Padding added before split point for right
+ *                              ISP's input
+ * @reserved:                   Reserved filed for alignment
+ *
+ */
+struct cam_isp_dual_split_params {
+	uint32_t                       split_point;
+	uint32_t                       right_padding;
+	uint32_t                       left_padding;
+	uint32_t                       reserved;
+};
+
+/**
+ * struct cam_isp_dual_stripe_config - stripe config per bus client
+ *
+ * @offset:                     Start horizontal offset relative to
+ *                              output buffer
+ *                              In UBWC mode, this value indicates the H_INIT
+ *                              value in pixel
+ * @width:                      Width of the stripe in bytes
+ * @tileconfig                  Ubwc meta tile config. Contain the partial
+ *                              tile info
+ * @port_id:                    port id of ISP output
+ *
+ */
+struct cam_isp_dual_stripe_config {
+	uint32_t                       offset;
+	uint32_t                       width;
+	uint32_t                       tileconfig;
+	uint32_t                       port_id;
+};
+
+/**
+ * struct cam_isp_dual_config - dual isp configuration
+ *
+ * @num_ports                   Number of isp output ports
+ * @reserved                    Reserved field for alignment
+ * @split_params:               Inpput split parameters
+ * @stripes:                    Stripe information
+ *
+ */
+struct cam_isp_dual_config {
+	uint32_t                           num_ports;
+	uint32_t                           reserved;
+	struct cam_isp_dual_split_params   split_params;
+	struct cam_isp_dual_stripe_config  stripes[1];
+};
+
 #endif /* __UAPI_CAM_ISP_H__ */
diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h
index 4fe325d..63fd555 100644
--- a/include/uapi/media/msm_vidc.h
+++ b/include/uapi/media/msm_vidc.h
@@ -6,6 +6,7 @@
 #define MSM_VIDC_HAL_INTERLACE_COLOR_FORMAT_NV12	0x2
 #define MSM_VIDC_HAL_INTERLACE_COLOR_FORMAT_NV12_UBWC	0x8002
 #define MSM_VIDC_4x_1 0x1
+#define MSM_VIDC_EXTRADATA_FRAME_QP_ADV 0x1
 
 struct msm_vidc_extradata_header {
 	unsigned int size;
@@ -137,6 +138,10 @@ struct msm_vidc_stream_userdata_payload {
 
 struct msm_vidc_frame_qp_payload {
 	unsigned int frame_qp;
+	unsigned int qp_sum;
+	unsigned int skip_qp_sum;
+	unsigned int skip_num_blocks;
+	unsigned int total_num_blocks;
 };
 
 struct msm_vidc_frame_bits_info_payload {
diff --git a/init/Kconfig b/init/Kconfig
index af000c7..9782dfc 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1293,6 +1293,16 @@
 
 	  If unsure, say N here.
 
+config SCHED_CORE_ROTATE
+	bool "Scheduler core rotation"
+	depends on SMP
+	help
+	  This options enables the core rotation functionality in
+	  the scheduler. Scheduler with core rotation aims to utilize
+	  CPUs evenly.
+
+	  If unsure, say N here.
+
 config CHECKPOINT_RESTORE
 	bool "Checkpoint/restore support" if EXPERT
 	select PROC_CHILDREN
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 10e6d8b..e99d860 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -110,14 +110,38 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
 	lock->owner_cpu = -1;
 }
 
-/*
- * We are now relying on the NMI watchdog to detect lockup instead of doing
- * the detection here with an unfair lock which can cause problem of its own.
- */
+static void __spin_lock_debug(raw_spinlock_t *lock)
+{
+	u64 i;
+	u64 loops = loops_per_jiffy * HZ;
+
+	for (i = 0; i < loops; i++) {
+		if (arch_spin_trylock(&lock->raw_lock))
+			return;
+		__delay(1);
+	}
+	/* lockup suspected: */
+	spin_bug(lock, "lockup suspected");
+#ifdef CONFIG_SMP
+	trigger_all_cpu_backtrace();
+#endif
+
+	/*
+	 * The trylock above was causing a livelock.  Give the lower level arch
+	 * specific lock code a chance to acquire the lock. We have already
+	 * printed a warning/backtrace at this point. The non-debug arch
+	 * specific code might actually succeed in acquiring the lock.  If it is
+	 * not successful, the end-result is the same - there is no forward
+	 * progress.
+	 */
+	arch_spin_lock(&lock->raw_lock);
+}
+
 void do_raw_spin_lock(raw_spinlock_t *lock)
 {
 	debug_spin_lock_before(lock);
-	arch_spin_lock(&lock->raw_lock);
+	if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
+		__spin_lock_debug(lock);
 	debug_spin_lock_after(lock);
 }
 
@@ -155,6 +179,32 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
 
 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
 
+#if 0		/* __write_lock_debug() can lock up - maybe this can too? */
+static void __read_lock_debug(rwlock_t *lock)
+{
+	u64 i;
+	u64 loops = loops_per_jiffy * HZ;
+	int print_once = 1;
+
+	for (;;) {
+		for (i = 0; i < loops; i++) {
+			if (arch_read_trylock(&lock->raw_lock))
+				return;
+			__delay(1);
+		}
+		/* lockup suspected: */
+		if (print_once) {
+			print_once = 0;
+			printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
+					"%s/%d, %p\n",
+				raw_smp_processor_id(), current->comm,
+				current->pid, lock);
+			dump_stack();
+		}
+	}
+}
+#endif
+
 void do_raw_read_lock(rwlock_t *lock)
 {
 	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
@@ -204,6 +254,32 @@ static inline void debug_write_unlock(rwlock_t *lock)
 	lock->owner_cpu = -1;
 }
 
+#if 0		/* This can cause lockups */
+static void __write_lock_debug(rwlock_t *lock)
+{
+	u64 i;
+	u64 loops = loops_per_jiffy * HZ;
+	int print_once = 1;
+
+	for (;;) {
+		for (i = 0; i < loops; i++) {
+			if (arch_write_trylock(&lock->raw_lock))
+				return;
+			__delay(1);
+		}
+		/* lockup suspected: */
+		if (print_once) {
+			print_once = 0;
+			printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
+					"%s/%d, %p\n",
+				raw_smp_processor_id(), current->comm,
+				current->pid, lock);
+			dump_stack();
+		}
+	}
+}
+#endif
+
 void do_raw_write_lock(rwlock_t *lock)
 {
 	debug_write_lock_before(lock);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 08fd4be..d43ad69 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9624,3 +9624,50 @@ void sched_exit(struct task_struct *p)
 #endif /* CONFIG_SCHED_WALT */
 
 __read_mostly bool sched_predl = 1;
+
+#ifdef CONFIG_SCHED_CORE_ROTATE
+int
+find_first_cpu_bit(struct task_struct *p, const cpumask_t *search_cpus,
+		   struct sched_group *sg_target, bool *avoid_prev_cpu,
+		   bool *do_rotate, struct find_first_cpu_bit_env *env)
+{
+	int i = -1;
+	unsigned long mcc;
+	int cpu = smp_processor_id();
+
+	mcc = cpu_rq(cpu)->rd->max_cpu_capacity.val;
+
+	/* do rotation only for big CPUs. */
+	*do_rotate = (cpumask_first(search_cpus) < nr_cpu_ids &&
+		     capacity_orig_of(cpumask_first(search_cpus)) == mcc);
+
+	if (*do_rotate) {
+		if (time_before_eq(jiffies, *env->avoid_prev_cpu_last +
+				   env->interval))
+			return *env->rotate_cpu_start;
+
+		spin_lock(env->rotate_lock);
+		if (time_after(jiffies, *env->avoid_prev_cpu_last +
+					env->interval)) {
+			cpumask_t tmpmask;
+
+			*env->avoid_prev_cpu_last = jiffies;
+			*avoid_prev_cpu = true;
+
+			cpumask_copy(&tmpmask, sched_group_cpus(sg_target));
+			cpumask_andnot(&tmpmask, &tmpmask, cpu_isolated_mask);
+
+			i = cpumask_next(*env->rotate_cpu_start, &tmpmask);
+			if (i >= nr_cpu_ids)
+				i = cpumask_first(&tmpmask) - 1;
+			/* Change start CPU every interval. */
+			*env->rotate_cpu_start = i;
+		} else {
+			i = *env->rotate_cpu_start;
+		}
+		spin_unlock(env->rotate_lock);
+	}
+
+	return i;
+}
+#endif
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index e56af41..772d97b 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -19,6 +19,7 @@
 #include <linux/kthread.h>
 #include <linux/sched.h>
 #include <linux/sched/rt.h>
+#include <linux/syscore_ops.h>
 
 #include <trace/events/sched.h>
 
@@ -35,6 +36,10 @@ struct cluster_data {
 	unsigned int active_cpus;
 	unsigned int num_cpus;
 	unsigned int nr_isolated_cpus;
+#ifdef CONFIG_SCHED_CORE_ROTATE
+	unsigned long set_max;
+	unsigned long set_cur;
+#endif
 	cpumask_t cpu_mask;
 	unsigned int need_cpus;
 	unsigned int task_thres;
@@ -76,6 +81,7 @@ static void wake_up_core_ctl_thread(struct cluster_data *state);
 static bool initialized;
 
 static unsigned int get_active_cpu_count(const struct cluster_data *cluster);
+static void cpuset_next(struct cluster_data *cluster);
 
 /* ========================= sysfs interface =========================== */
 
@@ -88,6 +94,7 @@ static ssize_t store_min_cpus(struct cluster_data *state,
 		return -EINVAL;
 
 	state->min_cpus = min(val, state->max_cpus);
+	cpuset_next(state);
 	wake_up_core_ctl_thread(state);
 
 	return count;
@@ -109,6 +116,7 @@ static ssize_t store_max_cpus(struct cluster_data *state,
 	val = min(val, state->num_cpus);
 	state->max_cpus = val;
 	state->min_cpus = min(state->min_cpus, state->max_cpus);
+	cpuset_next(state);
 	wake_up_core_ctl_thread(state);
 
 	return count;
@@ -702,6 +710,67 @@ static void move_cpu_lru(struct cpu_data *cpu_data)
 	spin_unlock_irqrestore(&state_lock, flags);
 }
 
+#ifdef CONFIG_SCHED_CORE_ROTATE
+static void cpuset_next(struct cluster_data *cluster)
+{
+	int cpus_needed = cluster->num_cpus - cluster->min_cpus;
+
+	cluster->set_cur++;
+	cluster->set_cur = min(cluster->set_cur, cluster->set_max);
+
+	/*
+	 * This loop generates bit sets from 0 to pow(num_cpus, 2) - 1.
+	 * We start loop from set_cur to set_cur - 1 and break when weight of
+	 * set_cur equals to cpus_needed.
+	 */
+
+	while (1) {
+		if (bitmap_weight(&cluster->set_cur, BITS_PER_LONG) ==
+		    cpus_needed) {
+			break;
+		}
+		cluster->set_cur++;
+		cluster->set_cur = min(cluster->set_cur, cluster->set_max);
+		if (cluster->set_cur == cluster->set_max)
+			/* roll over */
+			cluster->set_cur = 0;
+	};
+
+	pr_debug("first_cpu=%d cpus_needed=%d set_cur=0x%lx\n",
+		 cluster->first_cpu, cpus_needed, cluster->set_cur);
+}
+
+static bool should_we_isolate(int cpu, struct cluster_data *cluster)
+{
+	/* cpu should be part of cluster */
+	return !!(cluster->set_cur & (1 << (cpu - cluster->first_cpu)));
+}
+
+static void core_ctl_resume(void)
+{
+	unsigned int i = 0;
+	struct cluster_data *cluster;
+
+	/* move to next isolation cpu set */
+	for_each_cluster(cluster, i)
+		cpuset_next(cluster);
+}
+
+static struct syscore_ops core_ctl_syscore_ops = {
+	.resume	= core_ctl_resume,
+};
+
+#else
+
+static void cpuset_next(struct cluster_data *cluster) { }
+
+static bool should_we_isolate(int cpu, struct cluster_data *cluster)
+{
+	return true;
+}
+
+#endif
+
 static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
 {
 	struct cpu_data *c, *tmp;
@@ -726,6 +795,9 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
 		if (c->is_busy)
 			continue;
 
+		if (!should_we_isolate(c->cpu, cluster))
+			continue;
+
 		spin_unlock_irqrestore(&state_lock, flags);
 
 		pr_debug("Trying to isolate CPU%u\n", c->cpu);
@@ -1029,6 +1101,11 @@ static int cluster_init(const struct cpumask *mask)
 	cluster->offline_delay_ms = 100;
 	cluster->task_thres = UINT_MAX;
 	cluster->nrrun = cluster->num_cpus;
+#ifdef CONFIG_SCHED_CORE_ROTATE
+	cluster->set_max = cluster->num_cpus * cluster->num_cpus;
+	/* by default mark all cpus as eligible */
+	cluster->set_cur = cluster->set_max - 1;
+#endif
 	cluster->enable = true;
 	INIT_LIST_HEAD(&cluster->lru);
 	spin_lock_init(&cluster->pending_lock);
@@ -1065,6 +1142,10 @@ static int __init core_ctl_init(void)
 	if (should_skip(cpu_possible_mask))
 		return 0;
 
+#ifdef CONFIG_SCHED_CORE_ROTATE
+	register_syscore_ops(&core_ctl_syscore_ops);
+#endif
+
 	cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
 			"core_ctl/isolation:online",
 			core_ctl_isolation_online_cpu, NULL);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d61c570..7b459ca 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5034,6 +5034,19 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
 DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
 
+#ifdef CONFIG_SCHED_CORE_ROTATE
+static int rotate_cpu_start;
+static DEFINE_SPINLOCK(rotate_lock);
+static unsigned long avoid_prev_cpu_last;
+
+static struct find_first_cpu_bit_env first_cpu_bit_env = {
+	.avoid_prev_cpu_last = &avoid_prev_cpu_last,
+	.rotate_cpu_start = &rotate_cpu_start,
+	.interval = HZ,
+	.rotate_lock = &rotate_lock,
+};
+#endif
+
 #ifdef CONFIG_NO_HZ_COMMON
 /*
  * per rq 'load' arrray crap; XXX kill this.
@@ -6883,7 +6896,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
 	int target_cpu, targeted_cpus = 0;
 	unsigned long task_util_boosted = 0, curr_util = 0;
 	long new_util, new_util_cum;
-	int i;
+	int i = -1;
 	int ediff = -1;
 	int cpu = smp_processor_id();
 	int min_util_cpu = -1;
@@ -6902,8 +6915,17 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
 	enum sched_boost_policy placement_boost = task_sched_boost(p) ?
 				sched_boost_policy() : SCHED_BOOST_NONE;
 	struct related_thread_group *grp;
+	cpumask_t search_cpus;
+	int prev_cpu = task_cpu(p);
+#ifdef CONFIG_SCHED_CORE_ROTATE
+	bool do_rotate = false;
+	bool avoid_prev_cpu = false;
+#else
+#define do_rotate false
+#define avoid_prev_cpu false
+#endif
 
-	sd = rcu_dereference(per_cpu(sd_ea, task_cpu(p)));
+	sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
 
 	if (!sd)
 		return target;
@@ -6922,7 +6944,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
 		rtg_target = &grp->preferred_cluster->cpus;
 
 	if (sync && bias_to_waker_cpu(p, cpu, rtg_target)) {
-		trace_sched_task_util_bias_to_waker(p, task_cpu(p),
+		trace_sched_task_util_bias_to_waker(p, prev_cpu,
 					task_util(p), cpu, cpu, 0, need_idle);
 		return cpu;
 	}
@@ -6985,14 +7007,30 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
 
 		target_cpu = -1;
 
+		cpumask_copy(&search_cpus, tsk_cpus_allowed(p));
+		cpumask_and(&search_cpus, &search_cpus,
+			    sched_group_cpus(sg_target));
+
+#ifdef CONFIG_SCHED_CORE_ROTATE
+		i = find_first_cpu_bit(p, &search_cpus, sg_target,
+				       &avoid_prev_cpu, &do_rotate,
+				       &first_cpu_bit_env);
+
+retry:
+#endif
 		/* Find cpu with sufficient capacity */
-		for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg_target)) {
+		while ((i = cpumask_next(i, &search_cpus)) < nr_cpu_ids) {
+			cpumask_clear_cpu(i, &search_cpus);
+
 			if (cpu_isolated(i))
 				continue;
 
 			if (isolated_candidate == -1)
 				isolated_candidate = i;
 
+			if (avoid_prev_cpu && i == prev_cpu)
+				continue;
+
 			if (is_reserved(i))
 				continue;
 
@@ -7061,9 +7099,9 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
 						    new_util ||
 						    (target_cpu_util ==
 						     new_util &&
-						     (i == task_cpu(p) ||
+						     (i == prev_cpu ||
 						      (target_cpu !=
-						       task_cpu(p) &&
+						       prev_cpu &&
 						       target_cpu_new_util_cum >
 						       new_util_cum))))) {
 						min_idle_idx_cpu = i;
@@ -7075,6 +7113,9 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
 					}
 				} else if (cpu_rq(i)->nr_running) {
 					target_cpu = i;
+#ifdef CONFIG_SCHED_CORE_ROTATE
+					do_rotate = false;
+#endif
 					break;
 				}
 			} else if (!need_idle) {
@@ -7114,6 +7155,19 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
 			}
 		}
 
+#ifdef CONFIG_SCHED_CORE_ROTATE
+		if (do_rotate) {
+			/*
+			 * We started iteration somewhere in the middle of
+			 * cpumask.  Iterate once again from bit 0 to the
+			 * previous starting point bit.
+			 */
+			do_rotate = false;
+			i = -1;
+			goto retry;
+		}
+#endif
+
 		if (target_cpu == -1 ||
 		    (target_cpu != min_util_cpu && !safe_to_pack &&
 		     !is_packing_eligible(p, task_util_boosted, sg_target,
@@ -7148,7 +7202,8 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
 		}
 	}
 
-	if (target_cpu != task_cpu(p) && !cpu_isolated(task_cpu(p))) {
+	if (target_cpu != task_cpu(p) && !avoid_prev_cpu &&
+	    !cpu_isolated(task_cpu(p))) {
 		struct energy_env eenv = {
 			.util_delta	= task_util(p),
 			.src_cpu	= task_cpu(p),
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 35382df..7feba85 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1711,6 +1711,19 @@ static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
 	return NULL;
 }
 
+#ifdef CONFIG_SCHED_CORE_ROTATE
+static int rotate_cpu_start;
+static DEFINE_SPINLOCK(rotate_lock);
+static unsigned long avoid_prev_cpu_last;
+
+static struct find_first_cpu_bit_env first_cpu_bit_env = {
+	.avoid_prev_cpu_last = &avoid_prev_cpu_last,
+	.rotate_cpu_start = &rotate_cpu_start,
+	.interval = HZ,
+	.rotate_lock = &rotate_lock,
+};
+#endif
+
 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
 
 static int find_lowest_rq(struct task_struct *task)
@@ -1719,7 +1732,7 @@ static int find_lowest_rq(struct task_struct *task)
 	struct sched_group *sg, *sg_target;
 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
 	int this_cpu = smp_processor_id();
-	int cpu, best_cpu;
+	int cpu = -1, best_cpu;
 	struct cpumask search_cpu, backup_search_cpu;
 	unsigned long cpu_capacity;
 	unsigned long best_capacity;
@@ -1730,6 +1743,13 @@ static int find_lowest_rq(struct task_struct *task)
 	int best_cpu_idle_idx = INT_MAX;
 	int cpu_idle_idx = -1;
 	bool placement_boost;
+#ifdef CONFIG_SCHED_CORE_ROTATE
+	bool do_rotate = false;
+	bool avoid_prev_cpu = false;
+#else
+#define do_rotate false
+#define avoid_prev_cpu false
+#endif
 
 	/* Make sure the mask is initialized first */
 	if (unlikely(!lowest_mask))
@@ -1761,6 +1781,10 @@ static int find_lowest_rq(struct task_struct *task)
 
 		sg = sd->groups;
 		do {
+			if (!cpumask_intersects(lowest_mask,
+						sched_group_cpus(sg)))
+				continue;
+
 			cpu = group_first_cpu(sg);
 			cpu_capacity = capacity_orig_of(cpu);
 
@@ -1778,20 +1802,37 @@ static int find_lowest_rq(struct task_struct *task)
 		} while (sg = sg->next, sg != sd->groups);
 		rcu_read_unlock();
 
-		cpumask_and(&search_cpu, lowest_mask,
-			    sched_group_cpus(sg_target));
-		cpumask_copy(&backup_search_cpu, lowest_mask);
-		cpumask_andnot(&backup_search_cpu, &backup_search_cpu,
-			       &search_cpu);
+		if (sg_target) {
+			cpumask_and(&search_cpu, lowest_mask,
+				    sched_group_cpus(sg_target));
+			cpumask_copy(&backup_search_cpu, lowest_mask);
+			cpumask_andnot(&backup_search_cpu, &backup_search_cpu,
+				       &search_cpu);
+
+#ifdef CONFIG_SCHED_CORE_ROTATE
+			cpu = find_first_cpu_bit(task, &search_cpu, sg_target,
+						 &avoid_prev_cpu, &do_rotate,
+						 &first_cpu_bit_env);
+#endif
+		} else {
+			cpumask_copy(&search_cpu, lowest_mask);
+			cpumask_clear(&backup_search_cpu);
+			cpu = -1;
+		}
 
 retry:
-		for_each_cpu(cpu, &search_cpu) {
+		while ((cpu = cpumask_next(cpu, &search_cpu)) < nr_cpu_ids) {
+			cpumask_clear_cpu(cpu, &search_cpu);
+
 			/*
 			 * Don't use capcity_curr_of() since it will
 			 * double count rt task load.
 			 */
 			util = cpu_util(cpu);
 
+			if (avoid_prev_cpu && cpu == task_cpu(task))
+				continue;
+
 			if (__cpu_overutilized(cpu, util + tutil))
 				continue;
 
@@ -1838,6 +1879,19 @@ static int find_lowest_rq(struct task_struct *task)
 			best_cpu = cpu;
 		}
 
+#ifdef CONFIG_SCHED_CORE_ROTATE
+		if (do_rotate) {
+			/*
+			 * We started iteration somewhere in the middle of
+			 * cpumask.  Iterate once again from bit 0 to the
+			 * previous starting point bit.
+			 */
+			do_rotate = false;
+			cpu = -1;
+			goto retry;
+		}
+#endif
+
 		if (best_cpu != -1) {
 			return best_cpu;
 		} else if (!cpumask_empty(&backup_search_cpu)) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a5b1377..7ac731d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2841,3 +2841,17 @@ static inline bool energy_aware(void)
 {
 	return sched_feat(ENERGY_AWARE);
 }
+
+#ifdef CONFIG_SCHED_CORE_ROTATE
+struct find_first_cpu_bit_env {
+	unsigned long *avoid_prev_cpu_last;
+	int *rotate_cpu_start;
+	int interval;
+	spinlock_t *rotate_lock;
+};
+
+int
+find_first_cpu_bit(struct task_struct *p, const cpumask_t *search_cpus,
+		   struct sched_group *sg_target, bool *avoid_prev_cpu,
+		   bool *do_rotate, struct find_first_cpu_bit_env *env);
+#endif
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 166c643..46480a7 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -123,7 +123,7 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
 EXPORT_SYMBOL(sched_get_nr_running_avg);
 
 #define BUSY_NR_RUN		3
-#define BUSY_LOAD_FACTOR	2
+#define BUSY_LOAD_FACTOR	10
 static inline void update_last_busy_time(int cpu, bool dequeue,
 				unsigned long prev_nr_run, u64 curr_time)
 {
diff --git a/scripts/Makefile.dtbo b/scripts/Makefile.dtbo
index db4a0f4..b298f4a 100644
--- a/scripts/Makefile.dtbo
+++ b/scripts/Makefile.dtbo
@@ -10,7 +10,7 @@
 ifneq ($(DTC_OVERLAY_TEST_EXT),)
 DTC_OVERLAY_TEST = $(DTC_OVERLAY_TEST_EXT)
 quiet_cmd_dtbo_verify	= VERIFY  $@
-cmd_dtbo_verify = $(DTC_OVERLAY_TEST) $(addprefix $(obj)/,$($(@F)-base)) $@ $(dot-target).dtb
+cmd_dtbo_verify = $(DTC_OVERLAY_TEST) $(addprefix $(obj)/,$($(@F)-base)) $@ $(dot-target).tmp
 else
 cmd_dtbo_verify = true
 endif