Merge "mhi: core: add support to dump debug registers"
diff --git a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
index f620892..6d99458 100644
--- a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
@@ -124,6 +124,8 @@
 		management boot options. The default value is 203 milliseconds.
 - qcom,esoc-skip-restart-for-mdm-crash: Boolean. If set, the esoc framework would skip the warm
 		reboot phase during the momem crash.
+- qcom,esoc-spmi-soft-reset: Boolean. If set, esoc framework will use qpnp apis to reset the
+		external modem chip instead of toggling gpios.
 
 Example:
 	mdm0: qcom,mdm0 {
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index cf8fac6..bb5f74f 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -47,7 +47,11 @@
  - qcom,va-vdd-micb-voltage: mic bias supply's voltage level min and max in mV
  - qcom,va-vdd-micb-current: mic bias supply's max current in mA
  - qcom,va-dmic-sample-rate: Sample rate defined for DMIC connected to VA macro
+
+Optional properties:
  - qcom,va-clk-mux-select: VA macro MCLK MUX selection
+ - qcom,va-island-mode-muxsel: VA macro island mode MUX selection
+		This property is required if qcom,va-clk-mux-select is provided
 
 Example:
 
@@ -62,6 +66,7 @@
 		qcom,va-vdd-micb-current = <11200>;
 		qcom,va-dmic-sample-rate = <4800000>;
 		qcom,va-clk-mux-select = <1>;
+		qcom,va-island-mode-muxsel = <0x033A0000>;
 	};
 };
 
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
new file mode 100644
index 0000000..073ba81
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
@@ -0,0 +1,78 @@
+GENI based Qualcomm Technologies Inc Universal Peripheral version 3 (QUPv3)
+					Serial Peripheral Interface (SPI)
+
+The QUP v3 core is a GENI based AHB slave that provides a common data path
+(an output FIFO and an input FIFO) for serial peripheral interface (SPI)
+mini-core.
+
+SPI in master mode supports up to 50MHz, up to four chip selects, programmable
+data path from 4 bits to 32 bits and numerous protocol variants.
+
+Required properties:
+- compatible:	  Should contain "qcom,spi-geni"
+- reg:		  Should contain base register location and length
+- interrupts:	  Interrupt number used by this controller
+- clocks:	  Should contain the core clock and the AHB clock.
+- clock-names:	  Should be "core" for the core clock and "iface" for the
+		  AHB clock.
+- pinctrl-names:  Property should contain "default" and "sleep" for the
+		  pin configurations during the usecase and during idle.
+- pinctrl-x:	  phandle to the default/sleep pin configurations.
+- #address-cells: Number of cells required to define a chip select
+		  address on the SPI bus. Should be set to 1.
+- #size-cells:	  Should be zero.
+- spi-max-frequency: Specifies maximum SPI clock frequency,
+		     Units - Hz. Definition as per
+		     Documentation/devicetree/bindings/spi/spi-bus.txt
+- qcom,wrapper-core: Wrapper QUPv3 core containing this SPI controller.
+
+Optional properties:
+- qcom,rt:	Specifies if the framework worker thread for this
+		controller device should have "real-time" priority.
+- qcom,disable-autosuspend: Specifies to disable runtime PM auto suspend.
+
+SPI slave nodes must be children of the SPI master node and can contain
+the following properties.
+
+Required properties:
+- compatible:     Should contain:
+                  "qcom,spi-msm-codec-slave" for external codec control
+
+- reg:            Chip select address of device.
+
+- spi-max-frequency: Maximum SPI clocking speed of device in Hz.
+
+Optional properties:
+- spi-cpha:       Empty property indicating device requires
+                  shifted clock phase (CPHA) mode.
+
+- qcom,slv-ctrl : Set this flag to configure QUPV3 as SPI slave controller.
+
+Other optional properties described in
+Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+	qupv3_spi10: spi@a84000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa84000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qup_1_spi_2_active>;
+		pinctrl-1 = <&qup_1_spi_2_sleep>;
+		interrupts = <GIC_SPI 354 0>;
+		spi-max-frequency = <19200000>;
+		qcom,wrapper-core = <&qupv3_0>;
+
+		dev@0 {
+			compatible = "dummy,slave";
+			reg = <0>;
+			spi-max-frequency = <9600000>;
+		};
+	};
diff --git a/Makefile b/Makefile
index 0d6b4d8..0f70c8c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 9
+SUBLEVEL = 10
 EXTRAVERSION =
 NAME = "People's Front"
 
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
index 1d158cf..c45aef8 100644
--- a/arch/arm/boot/dts/am3517-evm.dts
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -227,7 +227,7 @@
 	vmmc-supply = <&vmmc_fixed>;
 	bus-width = <4>;
 	wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
-	cd-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio_127 */
+	cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio_127 */
 };
 
 &mmc3 {
diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi
index dae6e45..b1c988e 100644
--- a/arch/arm/boot/dts/am3517-som.dtsi
+++ b/arch/arm/boot/dts/am3517-som.dtsi
@@ -163,7 +163,7 @@
 		compatible = "ti,wl1271";
 		reg = <2>;
 		interrupt-parent = <&gpio6>;
-		interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; /* gpio_170 */
+		interrupts = <10 IRQ_TYPE_EDGE_RISING>; /* gpio_170 */
 		ref-clock-frequency = <26000000>;
 		tcxo-clock-frequency = <26000000>;
 	};
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index ac34333..98b682a 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -129,7 +129,7 @@
 };
 
 &mmc3 {
-	interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
+	interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
 	pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
 	pinctrl-names = "default";
 	vmmc-supply = <&wl12xx_vmmc>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 9d5d53f..c39cf2c 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -35,7 +35,7 @@
  * jumpering combinations for the long run.
  */
 &mmc3 {
-	interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
+	interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
 	pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>;
 	pinctrl-names = "default";
 	vmmc-supply = <&wl12xx_vmmc>;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 61f68e5..b405992 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -308,7 +308,7 @@
 				  0x1 0x0 0x60000000 0x10000000
 				  0x2 0x0 0x70000000 0x10000000
 				  0x3 0x0 0x80000000 0x10000000>;
-			clocks = <&mck>;
+			clocks = <&h32ck>;
 			status = "disabled";
 
 			nand_controller: nand-controller {
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index dd28d26..d10d883 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -726,6 +726,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
 	struct modem_private_data *priv = port->private_data;
 	int ret;
 
+	if (!priv)
+		return;
+
 	if (IS_ERR(priv->regulator))
 		return;
 
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 7b95729..38a1be6 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -351,7 +351,7 @@ static void omap44xx_prm_reconfigure_io_chain(void)
  * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
  * omap44xx_prm_reconfigure_io_chain() must be called.  No return value.
  */
-static void __init omap44xx_prm_enable_io_wakeup(void)
+static void omap44xx_prm_enable_io_wakeup(void)
 {
 	s32 inst = omap4_prmst_get_prm_dev_inst();
 
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
index 5f33420..95cabad 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
@@ -21,14 +21,14 @@
 		switch-source = <&pm8150l_switch2>;
 		status = "ok";
 	};
-};
 
-&cam_cci0 {
 	qcom,cam-res-mgr {
 		compatible = "qcom,cam-res-mgr";
 		status = "ok";
 	};
+};
 
+&cam_cci0 {
 	actuator_rear: qcom,actuator0 {
 		cell-index = <0>;
 		compatible = "qcom,actuator";
@@ -115,43 +115,6 @@
 		clock-rates = <24000000>;
 	};
 
-	eeprom_front: qcom,eeprom2 {
-		cell-index = <2>;
-		compatible = "qcom,eeprom";
-		cam_vio-supply = <&pm8009_l7>;
-		cam_vana-supply = <&pm8009_l6>;
-		cam_vdig-supply = <&pm8009_l3>;
-		cam_clk-supply = <&titan_top_gdsc>;
-		cam_vaf-supply = <&pm8150a_l7>;
-		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
-			"cam_clk", "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
-		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
-		gpio-no-mux = <0>;
-		pinctrl-names = "cam_default", "cam_suspend";
-		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
-		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
-		gpios = <&tlmm 96 0>,
-			<&tlmm 78 0>;
-		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1>;
-		gpio-req-tbl-flags = <1 0>;
-		gpio-req-tbl-label = "CAMIF_MCLK2",
-					"CAM_RESET2";
-		sensor-position = <1>;
-		sensor-mode = <0>;
-		cci-master = <1>;
-		status = "ok";
-		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
-		clock-names = "cam_clk";
-		clock-cntl-level = "turbo";
-		clock-rates = <24000000>;
-	};
-
 	qcom,cam-sensor0 {
 		cell-index = <0>;
 		compatible = "qcom,cam-sensor";
@@ -236,6 +199,45 @@
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
 	};
+};
+
+&cam_cci1 {
+	eeprom_front: qcom,eeprom2 {
+		cell-index = <2>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_front>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_front>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
 
 	qcom,cam-sensor2 {
 		cell-index = <2>;
@@ -271,7 +273,7 @@
 		gpio-req-tbl-label = "CAMIF_MCLK2",
 					"CAM_RESET2";
 		sensor-mode = <0>;
-		cci-master = <1>;
+		cci-master = <0>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
 		clock-names = "cam_clk";
@@ -279,3 +281,4 @@
 		clock-rates = <24000000>;
 	};
 };
+
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
index 5f33420..95cabad 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
@@ -21,14 +21,14 @@
 		switch-source = <&pm8150l_switch2>;
 		status = "ok";
 	};
-};
 
-&cam_cci0 {
 	qcom,cam-res-mgr {
 		compatible = "qcom,cam-res-mgr";
 		status = "ok";
 	};
+};
 
+&cam_cci0 {
 	actuator_rear: qcom,actuator0 {
 		cell-index = <0>;
 		compatible = "qcom,actuator";
@@ -115,43 +115,6 @@
 		clock-rates = <24000000>;
 	};
 
-	eeprom_front: qcom,eeprom2 {
-		cell-index = <2>;
-		compatible = "qcom,eeprom";
-		cam_vio-supply = <&pm8009_l7>;
-		cam_vana-supply = <&pm8009_l6>;
-		cam_vdig-supply = <&pm8009_l3>;
-		cam_clk-supply = <&titan_top_gdsc>;
-		cam_vaf-supply = <&pm8150a_l7>;
-		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
-			"cam_clk", "cam_vaf";
-		rgltr-cntrl-support;
-		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
-		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
-		rgltr-load-current = <0 80000 1200000 0 0>;
-		gpio-no-mux = <0>;
-		pinctrl-names = "cam_default", "cam_suspend";
-		pinctrl-0 = <&cam_sensor_mclk2_active
-				 &cam_sensor_active_front>;
-		pinctrl-1 = <&cam_sensor_mclk2_suspend
-				 &cam_sensor_suspend_front>;
-		gpios = <&tlmm 96 0>,
-			<&tlmm 78 0>;
-		gpio-reset = <1>;
-		gpio-req-tbl-num = <0 1>;
-		gpio-req-tbl-flags = <1 0>;
-		gpio-req-tbl-label = "CAMIF_MCLK2",
-					"CAM_RESET2";
-		sensor-position = <1>;
-		sensor-mode = <0>;
-		cci-master = <1>;
-		status = "ok";
-		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
-		clock-names = "cam_clk";
-		clock-cntl-level = "turbo";
-		clock-rates = <24000000>;
-	};
-
 	qcom,cam-sensor0 {
 		cell-index = <0>;
 		compatible = "qcom,cam-sensor";
@@ -236,6 +199,45 @@
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
 	};
+};
+
+&cam_cci1 {
+	eeprom_front: qcom,eeprom2 {
+		cell-index = <2>;
+		compatible = "qcom,eeprom";
+		cam_vio-supply = <&pm8009_l7>;
+		cam_vana-supply = <&pm8009_l6>;
+		cam_vdig-supply = <&pm8009_l3>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		cam_vaf-supply = <&pm8150a_l7>;
+		regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+			"cam_clk", "cam_vaf";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
+		rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
+		rgltr-load-current = <0 80000 1200000 0 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk2_active
+				 &cam_sensor_active_front>;
+		pinctrl-1 = <&cam_sensor_mclk2_suspend
+				 &cam_sensor_suspend_front>;
+		gpios = <&tlmm 96 0>,
+			<&tlmm 78 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK2",
+					"CAM_RESET2";
+		sensor-position = <1>;
+		sensor-mode = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
 
 	qcom,cam-sensor2 {
 		cell-index = <2>;
@@ -271,7 +273,7 @@
 		gpio-req-tbl-label = "CAMIF_MCLK2",
 					"CAM_RESET2";
 		sensor-mode = <0>;
-		cci-master = <1>;
+		cci-master = <0>;
 		status = "ok";
 		clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
 		clock-names = "cam_clk";
@@ -279,3 +281,4 @@
 		clock-rates = <24000000>;
 	};
 };
+
diff --git a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
index 8127c69..05b9be8 100644
--- a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
@@ -49,6 +49,13 @@
 		pinctrl-0 = <&bt_en_active>;
 		qca,bt-reset-gpio = <&tlmm 21 0>; /* BT_EN */
 		qca,bt-vdd-ldo-supply = <&pm8150_s6>;
+		qca,bt-vdd-dig-supply = <&pm8009_s2>;
+		qca,bt-vdd-rfa1-supply = <&pm8150_s5>;
+		qca,bt-vdd-rfa2-supply = <&pm8150a_s8>;
+
 		qca,bt-vdd-ldo-voltage-level = <950000 950000>;
+		qca,bt-vdd-dig-voltage-level = <950000 950000>;
+		qca,bt-vdd-rfa1-voltage-level = <1900000 1900000>;
+		qca,bt-vdd-rfa2-voltage-level = <1350000 1350000>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
index 71ad59c..a26983c 100644
--- a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
@@ -49,6 +49,13 @@
 		pinctrl-0 = <&bt_en_active>;
 		qca,bt-reset-gpio = <&tlmm 21 0>; /* BT_EN */
 		qca,bt-vdd-ldo-supply = <&pm8150_s6>;
+		qca,bt-vdd-dig-supply = <&pm8009_s2>;
+		qca,bt-vdd-rfa1-supply = <&pm8150_s5>;
+		qca,bt-vdd-rfa2-supply = <&pm8150a_s8>;
+
 		qca,bt-vdd-ldo-voltage-level = <950000 950000>;
+		qca,bt-vdd-dig-voltage-level = <950000 950000>;
+		qca,bt-vdd-rfa1-voltage-level = <1900000 1900000>;
+		qca,bt-vdd-rfa2-voltage-level = <1350000 1350000>;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi b/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
index 7590e788..dde4d12 100644
--- a/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
@@ -4,6 +4,7 @@
  */
 
 #include <dt-bindings/msm/msm-bus-ids.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 &soc {
 	/* QUPv3_0  wrapper  instance : North QUP*/
@@ -35,9 +36,34 @@
 		pinctrl-1 = <&qupv3_se2_2uart_sleep>;
 		interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,wrapper-core = <&qupv3_0>;
+		qcom,change-sampling-rate;
 		status = "disabled";
 	};
 
+	/*
+	 * HS UART instances. HS UART usecases can be supported on these
+	 * instances only.
+	 */
+	qupv3_se6_4uart: qcom,qup_uart@998000 {
+		compatible = "qcom,msm-geni-serial-hs";
+		reg = <0x998000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se6_ctsrx>, <&qupv3_se6_rts>,
+							<&qupv3_se6_tx>;
+		pinctrl-1 = <&qupv3_se6_ctsrx>, <&qupv3_se6_rts>,
+							<&qupv3_se6_tx>;
+		interrupts-extended = <&pdc 607 IRQ_TYPE_LEVEL_HIGH>,
+					<&tlmm 19 0>;
+		status = "disabled";
+		qcom,wakeup-byte = <0xFD>;
+		qcom,wrapper-core = <&qupv3_0>;
+	};
+
 		/* I2C */
 	qupv3_se0_i2c: i2c@980000 {
 		compatible = "qcom,i2c-geni";
@@ -243,6 +269,7 @@
 		pinctrl-1 = <&qupv3_se12_2uart_sleep>;
 		interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
 		qcom,wrapper-core = <&qupv3_1>;
+		qcom,change-sampling-rate;
 		status = "disabled";
 	};
 
@@ -382,7 +409,51 @@
 		};
 	};
 
-		/* I2C */
+	/*
+	 * HS UART : Modem/Audio backup
+	 */
+	qupv3_se17_4uart: qcom,qup_uart@88c000 {
+		compatible = "qcom,msm-geni-serial-hs";
+		reg = <0x88c000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP2_S3_CLK>,
+				<&clock_gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>,
+				<&clock_gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se17_ctsrx>, <&qupv3_se17_rts>,
+							<&qupv3_se17_tx>;
+		pinctrl-1 = <&qupv3_se17_ctsrx>, <&qupv3_se17_rts>,
+							<&qupv3_se17_tx>;
+		interrupts-extended = <&pdc 585 IRQ_TYPE_LEVEL_HIGH>,
+					<&tlmm 55 0>;
+		status = "disabled";
+		qcom,wakeup-byte = <0xFD>;
+		qcom,wrapper-core = <&qupv3_2>;
+	};
+
+	/*
+	 * HS UART : 2-wire Modem
+	 */
+	qupv3_se18_2uart: qcom,qup_uart@890000 {
+		compatible = "qcom,msm-geni-serial-hs";
+		reg = <0x890000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP2_S4_CLK>,
+				<&clock_gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>,
+				<&clock_gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qupv3_se18_rx>, <&qupv3_se18_tx>;
+		pinctrl-1 = <&qupv3_se18_rx>, <&qupv3_se18_tx>;
+		interrupts-extended = <&pdc 586 IRQ_TYPE_LEVEL_HIGH>,
+						<&tlmm 59 0>;
+		status = "disabled";
+		qcom,wakeup-byte = <0xFD>;
+		qcom,wrapper-core = <&qupv3_2>;
+	};
+
+	/* I2C */
 	qupv3_se14_i2c: i2c@880000 {
 		compatible = "qcom,i2c-geni";
 		reg = <0x880000 0x4000>;
@@ -502,4 +573,5 @@
 		qcom,wrapper-core = <&qupv3_2>;
 		status = "disabled";
 	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-regulators.dtsi b/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
index e1622b4..f25be0a 100644
--- a/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
@@ -77,9 +77,9 @@
 		S4A: pm8150_s4: regulator-pm8150-s4 {
 			regulator-name = "pm8150_s4";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <1824000>;
-			regulator-max-microvolt = <1824000>;
-			qcom,init-voltage = <1824000>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1920000>;
+			qcom,init-voltage = <1800000>;
 		};
 	};
 
@@ -136,9 +136,9 @@
 		L3A: pm8150_l3: regulator-pm8150-l3 {
 			regulator-name = "pm8150_l3";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <480000>;
+			regulator-min-microvolt = <928000>;
 			regulator-max-microvolt = <932000>;
-			qcom,init-voltage = <480000>;
+			qcom,init-voltage = <928000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
@@ -207,7 +207,7 @@
 			regulator-name = "pm8150_l7";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1704000>;
-			regulator-max-microvolt = <1900000>;
+			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1704000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
@@ -243,7 +243,7 @@
 			regulator-name = "pm8150_l10";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
-			regulator-max-microvolt = <3000000>;
+			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <1800000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
@@ -348,9 +348,9 @@
 		L16A: pm8150_l16: regulator-pm8150-l16 {
 			regulator-name = "pm8150_l16";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <3304000>;
+			regulator-min-microvolt = <3024000>;
 			regulator-max-microvolt = <3304000>;
-			qcom,init-voltage = <3304000>;
+			qcom,init-voltage = <3024000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
@@ -366,9 +366,9 @@
 		L17A: pm8150_l17: regulator-pm8150-l17 {
 			regulator-name = "pm8150_l17";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <2856000>;
+			regulator-min-microvolt = <2496000>;
 			regulator-max-microvolt = <3008000>;
-			qcom,init-voltage = <2856000>;
+			qcom,init-voltage = <2496000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
@@ -518,9 +518,9 @@
 		L2C: pm8150a_l2: regulator-pm8150a-l2 {
 			regulator-name = "pm8150a_l2";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <1304000>;
+			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1304000>;
-			qcom,init-voltage = <1304000>;
+			qcom,init-voltage = <1200000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
 	};
@@ -537,7 +537,7 @@
 			regulator-name = "pm8150a_l3";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <800000>;
-			regulator-max-microvolt = <1300000>;
+			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <800000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
@@ -555,7 +555,7 @@
 			regulator-name = "pm8150a_l4";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
-			regulator-max-microvolt = <1808000>;
+			regulator-max-microvolt = <2800000>;
 			qcom,init-voltage = <1800000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
@@ -573,7 +573,7 @@
 			regulator-name = "pm8150a_l5";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
-			regulator-max-microvolt = <1808000>;
+			regulator-max-microvolt = <2800000>;
 			qcom,init-voltage = <1800000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
@@ -724,7 +724,7 @@
 			regulator-name = "pm8009_s1";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1200000>;
-			regulator-max-microvolt = <1400000>;
+			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
 		};
 	};
@@ -736,7 +736,7 @@
 			regulator-name = "pm8009_s2";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <952000>;
-			regulator-max-microvolt = <2000000>;
+			regulator-max-microvolt = <952000>;
 			qcom,init-voltage = <952000>;
 		};
 	};
@@ -753,7 +753,7 @@
 			regulator-name = "pm8009_l1";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1104000>;
-			regulator-max-microvolt = <1328000>;
+			regulator-max-microvolt = <1104000>;
 			qcom,init-voltage = <1104000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
@@ -771,7 +771,7 @@
 			regulator-name = "pm8009_l2";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1200000>;
-			regulator-max-microvolt = <1328000>;
+			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
@@ -789,7 +789,7 @@
 			regulator-name = "pm8009_l3";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1056000>;
-			regulator-max-microvolt = <1328000>;
+			regulator-max-microvolt = <1056000>;
 			qcom,init-voltage = <1056000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
 		};
diff --git a/arch/arm64/boot/dts/qcom/kona-rumi.dtsi b/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
index e649a7e..e93c034 100644
--- a/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-rumi.dtsi
@@ -85,10 +85,6 @@
 				     0x10060 0x3c
 				     0x0 0x4>;
 	};
-
-	usb_nop_phy: usb_nop_phy {
-		compatible = "usb-nop-xceiv";
-	};
 };
 
 &usb0 {
diff --git a/arch/arm64/boot/dts/qcom/kona-usb.dtsi b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
index 673c95b..a453eba 100644
--- a/arch/arm64/boot/dts/qcom/kona-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
@@ -4,6 +4,7 @@
  */
 
 #include <dt-bindings/clock/qcom,gcc-kona.h>
+#include <dt-bindings/phy/qcom,kona-qmp-usb3.h>
 
 &soc {
 	/* Primary USB port related controller */
@@ -59,6 +60,7 @@
 			compatible = "snps,dwc3";
 			reg = <0x0a600000 0xcd00>;
 			interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+			usb-phy = <&usb2_phy0>, <&usb_nop_phy>;
 			linux,sysdev_is_parent;
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
@@ -67,7 +69,7 @@
 			snps,usb3-u1u2-disable;
 			usb-core-id = <0>;
 			tx-fifo-resize;
-			maximum-speed = "super-speed-plus";
+			maximum-speed = "high-speed";
 			dr_mode = "drd";
 		};
 
@@ -101,10 +103,222 @@
 		};
 	};
 
+	/* Primary USB port related High Speed PHY */
+	usb2_phy0: hsphy@88e3000 {
+		compatible = "qcom,usb-hsphy-snps-femto";
+		reg = <0x88e3000 0x110>;
+		reg-names = "hsusb_phy_base";
+
+		vdd-supply = <&pm8150_l5>;
+		vdda18-supply = <&pm8150_l12>;
+		vdda33-supply = <&pm8150_l2>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+
+		clocks = <&clock_rpmh RPMH_CXO_CLK>;
+		clock-names = "ref_clk_src";
+
+		resets = <&clock_gcc GCC_QUSB2PHY_PRIM_BCR>;
+		reset-names = "phy_reset";
+	};
+
+	/* Primary USB port related QMP USB DP Combo PHY */
+	usb_qmp_dp_phy: ssphy@88e8000 {
+		compatible = "qcom,usb-ssphy-qmp-dp-combo";
+		reg = <0x88e8000 0x3000>;
+		reg-names = "qmp_phy_base";
+
+		vdd-supply = <&pm8150_l18>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,vdd-max-load-uA = <47000>;
+		core-supply = <&pm8150_l9>;
+		qcom,vbus-valid-override;
+		qcom,link-training-reset;
+		qcom,qmp-phy-init-seq =
+			/* <reg_offset, value, delay> */
+			<USB3_DP_QSERDES_COM_SSC_EN_CENTER 0x01 0
+			USB3_DP_QSERDES_COM_SSC_PER1 0x31 0
+			USB3_DP_QSERDES_COM_SSC_PER2 0x01 0
+			USB3_DP_QSERDES_COM_SSC_STEP_SIZE1_MODE0 0xDE 0
+			USB3_DP_QSERDES_COM_SSC_STEP_SIZE2_MODE0 0x07 0
+			USB3_DP_QSERDES_COM_SSC_STEP_SIZE1_MODE1 0xDE 0
+			USB3_DP_QSERDES_COM_SSC_STEP_SIZE2_MODE1 0x07 0
+			USB3_DP_QSERDES_COM_SYSCLK_BUF_ENABLE 0x0A 0
+			USB3_DP_QSERDES_COM_CMN_IPTRIM 0x20 0
+			USB3_DP_QSERDES_COM_CP_CTRL_MODE0 0x06 0
+			USB3_DP_QSERDES_COM_CP_CTRL_MODE1 0x06 0
+			USB3_DP_QSERDES_COM_PLL_RCTRL_MODE0 0x16 0
+			USB3_DP_QSERDES_COM_PLL_RCTRL_MODE1 0x16 0
+			USB3_DP_QSERDES_COM_PLL_CCTRL_MODE0 0x36 0
+			USB3_DP_QSERDES_COM_PLL_CCTRL_MODE1 0x36 0
+			USB3_DP_QSERDES_COM_SYSCLK_EN_SEL 0x1A 0
+			USB3_DP_QSERDES_COM_LOCK_CMP_EN 0x04 0
+			USB3_DP_QSERDES_COM_LOCK_CMP1_MODE0 0x14 0
+			USB3_DP_QSERDES_COM_LOCK_CMP2_MODE0 0x34 0
+			USB3_DP_QSERDES_COM_LOCK_CMP1_MODE1 0x34 0
+			USB3_DP_QSERDES_COM_LOCK_CMP2_MODE1 0x82 0
+			USB3_DP_QSERDES_COM_DEC_START_MODE0 0x82 0
+			USB3_DP_QSERDES_COM_DEC_START_MODE1 0x82 0
+			USB3_DP_QSERDES_COM_DIV_FRAC_START1_MODE0 0xAB 0
+			USB3_DP_QSERDES_COM_DIV_FRAC_START2_MODE0 0xEA 0
+			USB3_DP_QSERDES_COM_DIV_FRAC_START3_MODE0 0x02 0
+			USB3_DP_QSERDES_COM_DIV_FRAC_START1_MODE1 0xAB 0
+			USB3_DP_QSERDES_COM_DIV_FRAC_START2_MODE1 0xEA 0
+			USB3_DP_QSERDES_COM_DIV_FRAC_START3_MODE1 0x02 0
+			USB3_DP_QSERDES_COM_VCO_TUNE_MAP 0x02 0
+			USB3_DP_QSERDES_COM_VCO_TUNE1_MODE0 0x24 0
+			USB3_DP_QSERDES_COM_VCO_TUNE1_MODE1 0x24 0
+			USB3_DP_QSERDES_COM_VCO_TUNE2_MODE1 0x02 0
+			USB3_DP_QSERDES_COM_HSCLK_SEL 0x01 0
+			USB3_DP_QSERDES_COM_CORECLK_DIV_MODE1 0x08 0
+			USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0xCA 0
+			USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1E 0
+			USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0xCA 0
+			USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1E 0
+			USB3_DP_QSERDES_COM_BIN_VCOCAL_HSCLK_SEL 0x11 0
+			USB3_DP_QSERDES_TXA_RES_CODE_LANE_TX 0x00 0
+			USB3_DP_QSERDES_TXA_RES_CODE_LANE_RX 0x00 0
+			USB3_DP_QSERDES_TXA_RES_CODE_LANE_OFFSET_TX 0x16 0
+			USB3_DP_QSERDES_TXA_RES_CODE_LANE_OFFSET_RX 0x05 0
+			USB3_DP_QSERDES_TXA_LANE_MODE_1 0xD5 0
+			USB3_DP_QSERDES_TXA_RCV_DETECT_LVL_2 0x12 0
+			USB3_DP_QSERDES_TXA_PI_QEC_CTRL 0x20 0
+			USB3_DP_QSERDES_RXA_UCDR_SO_GAIN 0x05 0
+			USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_FO_GAIN 0x2F 0
+			USB3_DP_QSERDES_RXA_UCDR_SO_SATURATION_AND_ENABLE 0x7F 0
+			USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_COUNT_LOW 0xFF 0
+			USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_COUNT_HIGH 0x0F 0
+			USB3_DP_QSERDES_RXA_UCDR_PI_CONTROLS 0x99 0
+			USB3_DP_QSERDES_RXA_UCDR_SB2_THRESH1 0x04 0
+			USB3_DP_QSERDES_RXA_UCDR_SB2_THRESH2 0x08 0
+			USB3_DP_QSERDES_RXA_UCDR_SB2_GAIN1 0x05 0
+			USB3_DP_QSERDES_RXA_UCDR_SB2_GAIN2 0x05 0
+			USB3_DP_QSERDES_RXA_VGA_CAL_CNTRL1 0x54 0
+			USB3_DP_QSERDES_RXA_VGA_CAL_CNTRL2 0x0E 0
+			USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL2 0x0F 0
+			USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL3 0x4A 0
+			USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL4 0x0A 0
+			USB3_DP_QSERDES_RXA_RX_IDAC_TSETTLE_LOW 0xC0 0
+			USB3_DP_QSERDES_RXA_RX_IDAC_TSETTLE_HIGH 0x00 0
+			USB3_DP_QSERDES_RXA_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x77 0
+			USB3_DP_QSERDES_RXA_SIGDET_CNTRL 0x04 0
+			USB3_DP_QSERDES_RXA_SIGDET_DEGLITCH_CNTRL 0x0E 0
+			USB3_DP_QSERDES_RXA_RX_MODE_00_LOW 0xBF 0
+			USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH 0xBF 0
+			USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH2 0x3F 0
+			USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH3 0x7F 0
+			USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH4 0x94 0
+			USB3_DP_QSERDES_RXA_RX_MODE_01_LOW 0xDC 0
+			USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH 0xDC 0
+			USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH2 0x5C 0
+			USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH3 0x0B 0
+			USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH4 0xB3 0
+			USB3_DP_QSERDES_RXA_DFE_EN_TIMER 0x04 0
+			USB3_DP_QSERDES_RXA_DFE_CTLE_POST_CAL_OFFSET 0x38 0
+			USB3_DP_QSERDES_RXA_AUX_DATA_TCOARSE_TFINE 0xA0 0
+			USB3_DP_QSERDES_RXA_DCC_CTRL1 0x0C 0
+			USB3_DP_QSERDES_RXA_GM_CAL 0x1F 0
+			USB3_DP_QSERDES_RXA_VTH_CODE 0x10 0
+			USB3_DP_QSERDES_TXB_RES_CODE_LANE_TX 0x00 0
+			USB3_DP_QSERDES_TXB_RES_CODE_LANE_RX 0x00 0
+			USB3_DP_QSERDES_TXB_RES_CODE_LANE_OFFSET_TX 0x16 0
+			USB3_DP_QSERDES_TXB_RES_CODE_LANE_OFFSET_RX 0x05 0
+			USB3_DP_QSERDES_TXB_LANE_MODE_1 0xD5 0
+			USB3_DP_QSERDES_TXB_RCV_DETECT_LVL_2 0x12 0
+			USB3_DP_QSERDES_TXB_PI_QEC_CTRL 0x20 0
+			USB3_DP_QSERDES_RXB_UCDR_SO_GAIN 0x05 0
+			USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_FO_GAIN 0x2F 0
+			USB3_DP_QSERDES_RXB_UCDR_SO_SATURATION_AND_ENABLE 0x7F 0
+			USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_COUNT_LOW 0xFF 0
+			USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_COUNT_HIGH 0x0F 0
+			USB3_DP_QSERDES_RXB_UCDR_PI_CONTROLS 0x99 0
+			USB3_DP_QSERDES_RXB_UCDR_SB2_THRESH1 0x04 0
+			USB3_DP_QSERDES_RXB_UCDR_SB2_THRESH2 0x08 0
+			USB3_DP_QSERDES_RXB_UCDR_SB2_GAIN1 0x05 0
+			USB3_DP_QSERDES_RXB_UCDR_SB2_GAIN2 0x05 0
+			USB3_DP_QSERDES_RXB_VGA_CAL_CNTRL1 0x54 0
+			USB3_DP_QSERDES_RXB_VGA_CAL_CNTRL2 0x0E 0
+			USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL2 0x0F 0
+			USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL3 0x4A 0
+			USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL4 0x0A 0
+			USB3_DP_QSERDES_RXB_RX_IDAC_TSETTLE_LOW 0xC0 0
+			USB3_DP_QSERDES_RXB_RX_IDAC_TSETTLE_HIGH 0x00 0
+			USB3_DP_QSERDES_RXB_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x77 0
+			USB3_DP_QSERDES_RXB_SIGDET_CNTRL 0x04 0
+			USB3_DP_QSERDES_RXB_SIGDET_DEGLITCH_CNTRL 0x0E 0
+			USB3_DP_QSERDES_RXB_RX_MODE_00_LOW 0xBF 0
+			USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH 0xBF 0
+			USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH2 0x3F 0
+			USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH3 0x7F 0
+			USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH4 0x94 0
+			USB3_DP_QSERDES_RXB_RX_MODE_01_LOW 0xDC 0
+			USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH 0xDC 0
+			USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH2 0x5C 0
+			USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH3 0x0B 0
+			USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH4 0xB3 0
+			USB3_DP_QSERDES_RXB_DFE_EN_TIMER 0x04 0
+			USB3_DP_QSERDES_RXB_DFE_CTLE_POST_CAL_OFFSET 0x38 0
+			USB3_DP_QSERDES_RXB_AUX_DATA_TCOARSE_TFINE 0xA0 0
+			USB3_DP_QSERDES_RXB_DCC_CTRL1 0x0C 0
+			USB3_DP_QSERDES_RXB_GM_CAL 0x1F 0
+			USB3_DP_QSERDES_RXB_VTH_CODE 0x10 0
+			USB3_DP_PCS_LOCK_DETECT_CONFIG1 0xD0 0
+			USB3_DP_PCS_LOCK_DETECT_CONFIG2 0x07 0
+			USB3_DP_PCS_LOCK_DETECT_CONFIG3 0x20 0
+			USB3_DP_PCS_LOCK_DETECT_CONFIG6 0x13 0
+			USB3_DP_PCS_REFGEN_REQ_CONFIG1 0x21 0
+			USB3_DP_PCS_RX_SIGDET_LVL 0xAA 0
+			USB3_DP_PCS_CDR_RESET_TIME 0x0F 0
+			USB3_DP_PCS_ALIGN_DETECT_CONFIG1 0x88 0
+			USB3_DP_PCS_ALIGN_DETECT_CONFIG2 0x13 0
+			USB3_DP_PCS_PCS_TX_RX_CONFIG 0x0C 0
+			USB3_DP_PCS_EQ_CONFIG1 0x4B 0
+			USB3_DP_PCS_EQ_CONFIG5 0x10 0
+			USB3_DP_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0xF8 0
+			USB3_DP_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x07 0
+			0xffffffff 0xffffffff 0x00>;
+
+		qcom,qmp-phy-reg-offset =
+			<USB3_DP_PCS_PCS_STATUS1
+			 USB3_DP_PCS_USB3_AUTONOMOUS_MODE_CTRL
+			 USB3_DP_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR
+			 USB3_DP_PCS_POWER_DOWN_CONTROL
+			 USB3_DP_PCS_SW_RESET
+			 USB3_DP_PCS_START_CONTROL
+			 0xffff /* USB3_PHY_PCS_MISC_TYPEC_CTRL */
+			 0x2a18 /* USB3_DP_DP_PHY_PD_CTL */
+			 USB3_DP_COM_POWER_DOWN_CTRL
+			 USB3_DP_COM_SW_RESET
+			 USB3_DP_COM_RESET_OVRD_CTRL
+			 USB3_DP_COM_PHY_MODE_CTRL
+			 USB3_DP_COM_TYPEC_CTRL
+			 USB3_DP_COM_SWI_CTRL
+			 USB3_DP_PCS_CLAMP_ENABLE
+			 USB3_DP_PCS_PCS_STATUS2
+			 USB3_DP_PCS_INSIG_SW_CTRL3
+			 USB3_DP_PCS_INSIG_MX_CTRL3>;
+
+		clocks = <&clock_gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+			<&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+			<&clock_rpmh RPMH_CXO_CLK>,
+			<&clock_gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
+		clock-names = "aux_clk", "pipe_clk", "ref_clk_src",
+				"com_aux_clk";
+
+		resets = <&clock_gcc GCC_USB3_DP_PHY_PRIM_BCR>,
+			<&clock_gcc GCC_USB3_PHY_PRIM_BCR>;
+		reset-names = "global_phy_reset", "phy_reset";
+
+		status = "disabled";
+	};
+
 	usb_audio_qmi_dev {
 		compatible = "qcom,usb-audio-qmi-dev";
 		iommus = <&apps_smmu 0x180f 0x0>;
 		qcom,usb-audio-stream-id = <0xf>;
 		qcom,usb-audio-intr-num = <2>;
 	};
+
+	usb_nop_phy: usb_nop_phy {
+		compatible = "usb-nop-xceiv";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi b/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
index 97b772a5..3ddc27e 100644
--- a/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 &bolero {
@@ -14,6 +14,7 @@
 		qcom,va-vdd-micb-current = <11200>;
 		qcom,va-dmic-sample-rate = <4800000>;
 		qcom,va-clk-mux-select = <1>;
+		qcom,va-island-mode-muxsel = <0x033A0000>;
 	};
 };
 
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index 70e3e14..d8d7355 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -189,7 +189,7 @@
 			next-level-cache = <&L2_4>;
 			qcom,freq-domain = <&cpufreq_hw 1 4>;
 			capacity-dmips-mhz = <1894>;
-			dynamic-power-coefficient = <374>;
+			dynamic-power-coefficient = <514>;
 			L2_4: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
@@ -231,7 +231,7 @@
 			next-level-cache = <&L2_5>;
 			qcom,freq-domain = <&cpufreq_hw 1 4>;
 			capacity-dmips-mhz = <1894>;
-			dynamic-power-coefficient = <374>;
+			dynamic-power-coefficient = <514>;
 			L2_5: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
@@ -273,7 +273,7 @@
 			next-level-cache = <&L2_6>;
 			qcom,freq-domain = <&cpufreq_hw 1 4>;
 			capacity-dmips-mhz = <1894>;
-			dynamic-power-coefficient = <374>;
+			dynamic-power-coefficient = <514>;
 			L2_6: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x40000>;
@@ -315,7 +315,7 @@
 			next-level-cache = <&L2_7>;
 			qcom,freq-domain = <&cpufreq_hw 2 4>;
 			capacity-dmips-mhz = <1894>;
-			dynamic-power-coefficient = <431>;
+			dynamic-power-coefficient = <598>;
 			L2_7: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x80000>;
@@ -947,6 +947,11 @@
 			compatible = "qcom,msm-imem-pil";
 			reg = <0x94c 0xc8>;
 		};
+
+		diag_dload@c8 {
+			compatible = "qcom,msm-imem-diag-dload";
+			reg = <0xc8 0xc8>;
+		};
 	};
 
 	restart@c264000 {
@@ -978,6 +983,7 @@
 		qcom,support-shutdown;
 		qcom,pil-force-shutdown;
 		qcom,esoc-skip-restart-for-mdm-crash;
+		qcom,esoc-spmi-soft-reset;
 		pinctrl-names = "default", "mdm_active", "mdm_suspend";
 		pinctrl-0 = <&ap2mdm_pon_reset_default>;
 		pinctrl-1 = <&ap2mdm_active &mdm2ap_active>;
diff --git a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
index 720e54b..8c2dc63 100644
--- a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
@@ -1,24 +1,24 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 &soc {
 	/* GCC GDSCs */
 	ufs_phy_gdsc: qcom,gdsc@177004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0x177004 0x4>;
 		regulator-name = "ufs_phy_gdsc";
 	};
 
 	usb30_prim_gdsc: qcom,gdsc@10f004 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0x10f004 0x4>;
 		regulator-name = "usb30_prim_gdsc";
 	};
 
 	hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc: qcom,gdsc@17d050 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0x17d050 0x4>;
 		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc";
 		qcom,no-status-check-on-disable;
@@ -26,7 +26,7 @@
 	};
 
 	hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc: qcom,gdsc@17d058 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0x17d058 0x4>;
 		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc";
 		qcom,no-status-check-on-disable;
@@ -34,7 +34,7 @@
 	};
 
 	hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc: qcom,gdsc@17d054 {
-		compatible = "regulator-fixed";
+		compatible = "qcom,gdsc";
 		reg = <0x17d054 0x4>;
 		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc";
 		qcom,no-status-check-on-disable;
diff --git a/arch/arm64/boot/dts/qcom/lito.dtsi b/arch/arm64/boot/dts/qcom/lito.dtsi
index 88e4ff2..cf9597e 100644
--- a/arch/arm64/boot/dts/qcom/lito.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito.dtsi
@@ -806,15 +806,16 @@
 	clocks {
 		xo_board: xo-board {
 			compatible = "fixed-clock";
-			#clock-cells = <0>;
 			clock-frequency = <38400000>;
 			clock-output-names = "xo_board";
+			#clock-cells = <0>;
 		};
 
 		sleep_clk: sleep-clk {
 			compatible = "fixed-clock";
-			#clock-cells = <0>;
+			clock-output-names = "chip_sleep_clk";
 			clock-frequency = <32764>;
+			#clock-cells = <0>;
 		};
 	};
 
@@ -826,6 +827,14 @@
 		#clock-cells = <0>;
 	};
 
+	cxo_a: bi_tcxo_ao {
+		compatible = "fixed-factor-clock";
+		clocks = <&xo_board>;
+		clock-mult = <1>;
+		clock-div = <2>;
+		#clock-cells = <0>;
+	};
+
 	rpmhcc: qcom,rpmhclk {
 		compatible = "qcom,dummycc";
 		clock-output-names = "rpmh_clocks";
@@ -839,8 +848,11 @@
 	};
 
 	gcc: qcom,gcc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "gcc_clocks";
+		compatible = "qcom,gcc-lito", "syscon";
+		reg = <0x100000 0x1f0000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		vdd_cx_ao-supply = <&VDD_CX_LEVEL_AO>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
index 405edd8..e68bfd5 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
@@ -21,6 +21,14 @@
 		ranges;
 		qcom,regulator-names = "vdd";
 		vdd-supply = <&gpu_cx_gdsc>;
+
+		clocks = <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+			 <&clock_gcc GCC_GPU_SNOC_DVM_GFX_CLK>,
+			 <&clock_gpucc GPU_CC_AHB_CLK>;
+		clock-names = "gcc_gpu_memnoc_gfx",
+			      "gcc_gpu_snoc_dvm_gfx",
+			      "gpu_cc_ahb";
+
 		interrupts =	<GIC_SPI 672 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 673 IRQ_TYPE_LEVEL_HIGH>,
 				<GIC_SPI 678 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index 6d651f3..6921f8d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -31,6 +31,10 @@
 	status = "okay";
 };
 
+&tlmm {
+	gpio-reserved-ranges = <0 4>, <81 4>;
+};
+
 &uart9 {
 	status = "okay";
 };
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 99ffda0..524c53e 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -551,5 +551,4 @@
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
-CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
 CONFIG_CORESIGHT_TGU=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index d564c71..2e630b1 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -611,5 +611,4 @@
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_CORESIGHT_REMOTE_ETM=y
-CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
 CONFIG_CORESIGHT_TGU=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 8ed0a65..0bab4f1 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -349,6 +349,9 @@
 CONFIG_STAGING=y
 CONFIG_ASHMEM=y
 CONFIG_ION=y
+# CONFIG_QCOM_A53PLL is not set
+CONFIG_QCOM_CLK_RPMH=y
+CONFIG_SM_GCC_LITO=y
 CONFIG_HWSPINLOCK=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 74091fd..d5523ad 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -346,6 +346,8 @@ static int __hw_perf_event_init(struct perf_event *event)
 		break;
 
 	case PERF_TYPE_HARDWARE:
+		if (is_sampling_event(event))	/* No sampling support */
+			return -ENOENT;
 		ev = attr->config;
 		/* Count user space (problem-state) only */
 		if (!attr->exclude_user && attr->exclude_kernel) {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 2216d21..3692de8 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -55,7 +55,7 @@
 #define PRIo64 "o"
 
 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
-#define apic_debug(fmt, arg...)
+#define apic_debug(fmt, arg...) do {} while (0)
 
 /* 14 is the version for Xeon and Pentium 8.4.8*/
 #define APIC_VERSION			(0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e55f7a9..c97a9d6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -962,6 +962,7 @@ struct vcpu_vmx {
 	struct shared_msr_entry *guest_msrs;
 	int                   nmsrs;
 	int                   save_nmsrs;
+	bool                  guest_msrs_dirty;
 	unsigned long	      host_idt_base;
 #ifdef CONFIG_X86_64
 	u64 		      msr_host_kernel_gs_base;
@@ -1284,7 +1285,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
 					    u16 error_code);
 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
-static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
 							  u32 msr, int type);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
@@ -2874,6 +2875,20 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 
 	vmx->req_immediate_exit = false;
 
+	/*
+	 * Note that guest MSRs to be saved/restored can also be changed
+	 * when guest state is loaded. This happens when guest transitions
+	 * to/from long-mode by setting MSR_EFER.LMA.
+	 */
+	if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) {
+		vmx->guest_msrs_dirty = false;
+		for (i = 0; i < vmx->save_nmsrs; ++i)
+			kvm_set_shared_msr(vmx->guest_msrs[i].index,
+					   vmx->guest_msrs[i].data,
+					   vmx->guest_msrs[i].mask);
+
+	}
+
 	if (vmx->loaded_cpu_state)
 		return;
 
@@ -2934,11 +2949,6 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 		vmcs_writel(HOST_GS_BASE, gs_base);
 		host_state->gs_base = gs_base;
 	}
-
-	for (i = 0; i < vmx->save_nmsrs; ++i)
-		kvm_set_shared_msr(vmx->guest_msrs[i].index,
-				   vmx->guest_msrs[i].data,
-				   vmx->guest_msrs[i].mask);
 }
 
 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
@@ -3418,6 +3428,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
 		move_msr_up(vmx, index, save_nmsrs++);
 
 	vmx->save_nmsrs = save_nmsrs;
+	vmx->guest_msrs_dirty = true;
 
 	if (cpu_has_vmx_msr_bitmap())
 		vmx_update_msr_bitmap(&vmx->vcpu);
@@ -5924,7 +5935,7 @@ static void free_vpid(int vpid)
 	spin_unlock(&vmx_vpid_lock);
 }
 
-static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
 							  u32 msr, int type)
 {
 	int f = sizeof(unsigned long);
@@ -5962,7 +5973,7 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit
 	}
 }
 
-static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
+static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
 							 u32 msr, int type)
 {
 	int f = sizeof(unsigned long);
@@ -6000,7 +6011,7 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm
 	}
 }
 
-static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
+static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
 			     			      u32 msr, int type, bool value)
 {
 	if (value)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2eeddd8..c6c7c9b 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -7,7 +7,6 @@
 
 #include <xen/features.h>
 #include <xen/page.h>
-#include <xen/interface/memory.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -343,80 +342,3 @@ void xen_arch_unregister_cpu(int num)
 }
 EXPORT_SYMBOL(xen_arch_unregister_cpu);
 #endif
-
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-void __init arch_xen_balloon_init(struct resource *hostmem_resource)
-{
-	struct xen_memory_map memmap;
-	int rc;
-	unsigned int i, last_guest_ram;
-	phys_addr_t max_addr = PFN_PHYS(max_pfn);
-	struct e820_table *xen_e820_table;
-	const struct e820_entry *entry;
-	struct resource *res;
-
-	if (!xen_initial_domain())
-		return;
-
-	xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
-	if (!xen_e820_table)
-		return;
-
-	memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
-	set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
-	rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
-	if (rc) {
-		pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
-		goto out;
-	}
-
-	last_guest_ram = 0;
-	for (i = 0; i < memmap.nr_entries; i++) {
-		if (xen_e820_table->entries[i].addr >= max_addr)
-			break;
-		if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
-			last_guest_ram = i;
-	}
-
-	entry = &xen_e820_table->entries[last_guest_ram];
-	if (max_addr >= entry->addr + entry->size)
-		goto out; /* No unallocated host RAM. */
-
-	hostmem_resource->start = max_addr;
-	hostmem_resource->end = entry->addr + entry->size;
-
-	/*
-	 * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
-	 * as unavailable. The rest of that region can be used for hotplug-based
-	 * ballooning.
-	 */
-	for (; i < memmap.nr_entries; i++) {
-		entry = &xen_e820_table->entries[i];
-
-		if (entry->type == E820_TYPE_RAM)
-			continue;
-
-		if (entry->addr >= hostmem_resource->end)
-			break;
-
-		res = kzalloc(sizeof(*res), GFP_KERNEL);
-		if (!res)
-			goto out;
-
-		res->name = "Unavailable host RAM";
-		res->start = entry->addr;
-		res->end = (entry->addr + entry->size < hostmem_resource->end) ?
-			    entry->addr + entry->size : hostmem_resource->end;
-		rc = insert_resource(hostmem_resource, res);
-		if (rc) {
-			pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
-				__func__, res->start, res->end, rc);
-			kfree(res);
-			goto  out;
-		}
-	}
-
- out:
-	kfree(xen_e820_table);
-}
-#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1163e33..075ed47 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -808,6 +808,7 @@ char * __init xen_memory_setup(void)
 	addr = xen_e820_table.entries[0].addr;
 	size = xen_e820_table.entries[0].size;
 	while (i < xen_e820_table.nr_entries) {
+		bool discard = false;
 
 		chunk_size = size;
 		type = xen_e820_table.entries[i].type;
@@ -823,10 +824,11 @@ char * __init xen_memory_setup(void)
 				xen_add_extra_mem(pfn_s, n_pfns);
 				xen_max_p2m_pfn = pfn_s + n_pfns;
 			} else
-				type = E820_TYPE_UNUSABLE;
+				discard = true;
 		}
 
-		xen_align_and_add_e820_region(addr, chunk_size, type);
+		if (!discard)
+			xen_align_and_add_e820_region(addr, chunk_size, type);
 
 		addr += chunk_size;
 		size -= chunk_size;
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 08f26db..e938576e 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -700,7 +700,7 @@ static void iort_set_device_domain(struct device *dev,
  */
 static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
 {
-	struct acpi_iort_node *node, *msi_parent;
+	struct acpi_iort_node *node, *msi_parent = NULL;
 	struct fwnode_handle *iort_fwnode;
 	struct acpi_iort_its_group *its;
 	int i;
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index 58bea41b..eb9f4db 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -289,6 +289,27 @@ static int bluetooth_power(int on)
 				goto vdd_ldo_fail;
 			}
 		}
+		if (bt_power_pdata->bt_vdd_dig) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_dig);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vdddig config failed");
+				goto vdd_dig_fail;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_rfa1) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_rfa1);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddrfa1 config failed");
+				goto vdd_rfa1_fail;
+			}
+		}
+		if (bt_power_pdata->bt_vdd_rfa2) {
+			rc = bt_configure_vreg(bt_power_pdata->bt_vdd_rfa2);
+			if (rc < 0) {
+				BT_PWR_ERR("bt_power vddrfa1 config failed");
+				goto vdd_rfa2_fail;
+			}
+		}
 		if (bt_power_pdata->bt_chip_pwd) {
 			rc = bt_configure_vreg(bt_power_pdata->bt_chip_pwd);
 			if (rc < 0) {
@@ -325,6 +346,15 @@ static int bluetooth_power(int on)
 		if (bt_power_pdata->bt_chip_pwd)
 			bt_vreg_disable(bt_power_pdata->bt_chip_pwd);
 chip_pwd_fail:
+		if (bt_power_pdata->bt_vdd_rfa2)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_rfa2);
+vdd_rfa2_fail:
+		if (bt_power_pdata->bt_vdd_rfa1)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_rfa1);
+vdd_rfa1_fail:
+		if (bt_power_pdata->bt_vdd_dig)
+			bt_vreg_disable(bt_power_pdata->bt_vdd_dig);
+vdd_dig_fail:
 		if (bt_power_pdata->bt_vdd_ldo)
 			bt_vreg_disable(bt_power_pdata->bt_vdd_ldo);
 vdd_ldo_fail:
@@ -595,6 +625,21 @@ static int bt_power_populate_dt_pinfo(struct platform_device *pdev)
 		if (rc < 0)
 			BT_PWR_ERR("bt-chip-pwd not provided in device tree");
 
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_dig,
+					"qca,bt-vdd-dig");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-dig not provided in device tree");
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_rfa1,
+					"qca,bt-vdd-rfa1");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-rfa1 not provided in device tree");
+		rc = bt_dt_parse_vreg_info(&pdev->dev,
+					&bt_power_pdata->bt_vdd_rfa2,
+					"qca,bt-vdd-rfa2");
+		if (rc < 0)
+			BT_PWR_ERR("bt-vdd-rfa2 not provided in device tree");
 		rc = bt_dt_parse_clk_info(&pdev->dev,
 					&bt_power_pdata->bt_chip_clk);
 		if (rc < 0)
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 5889325..a723bba 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -1630,7 +1630,7 @@ static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 unsigned char *dci_get_buffer_from_bridge(int token)
 {
-	uint8_t retries = 0, max_retries = 3;
+	uint8_t retries = 0, max_retries = 50;
 	unsigned char *buf = NULL;
 
 	do {
@@ -2303,8 +2303,8 @@ struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
 		pid_struct = find_get_pid(entry->tgid);
 		if (!pid_struct) {
 			DIAG_LOG(DIAG_DEBUG_DCI,
-				"diag: valid pid doesn't exist for pid = %d\n",
-				entry->tgid);
+			"diag: Exited pid (%d) doesn't match dci client of pid (%d)\n",
+			tgid, entry->tgid);
 			continue;
 		}
 		task_s = get_pid_task(pid_struct, PIDTYPE_PID);
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index e918738..5a7910b 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -220,13 +220,6 @@ static void usb_connect_work_fn(struct work_struct *work)
  */
 static void usb_disconnect(struct diag_usb_info *ch)
 {
-	if (!ch)
-		return;
-
-	if (!atomic_read(&ch->connected) &&
-		driver->usb_connected && diag_mask_param())
-		diag_clear_masks(0);
-
 	if (ch && ch->ops && ch->ops->close)
 		ch->ops->close(ch->ctxt, DIAG_USB_MODE);
 }
@@ -236,6 +229,9 @@ static void usb_disconnect_work_fn(struct work_struct *work)
 	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
 						disconnect_work);
 
+	if (!ch)
+		return;
+
 	atomic_set(&ch->disconnected, 1);
 	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 	"diag: USB channel %s: disconnected_status: %d, connected_status: %d\n",
@@ -248,6 +244,10 @@ static void usb_disconnect_work_fn(struct work_struct *work)
 	"diag: USB channel %s: Cleared disconnected(%d) and connected(%d) status\n",
 	ch->name, atomic_read(&ch->disconnected), atomic_read(&ch->connected));
 
+	if (!atomic_read(&ch->connected) &&
+		driver->usb_connected && diag_mask_param())
+		diag_clear_masks(0);
+
 	usb_disconnect(ch);
 }
 
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 8863397..2cdd341 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -465,6 +465,12 @@ struct diag_logging_mode_param_t {
 	int peripheral;
 } __packed;
 
+struct diag_query_pid_t {
+	uint32_t peripheral_mask;
+	uint32_t pd_mask;
+	int pid;
+};
+
 struct diag_con_all_param_t {
 	uint32_t diag_con_all;
 	uint32_t num_peripherals;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 53228ec..6e5b656 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -534,8 +534,8 @@ static int diagchar_close(struct inode *inode, struct file *file)
 {
 	int ret;
 
-	DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
-		current->comm);
+	DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: %s process exit with pid = %d\n",
+		current->comm, current->tgid);
 	ret = diag_remove_client_entry(file);
 
 	return ret;
@@ -972,7 +972,7 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len,
 	int err = 0;
 	int max_len = 0;
 	uint8_t retry_count = 0;
-	uint8_t max_retries = 3;
+	uint8_t max_retries = 50;
 	uint16_t payload = 0;
 	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
 	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
@@ -2342,6 +2342,93 @@ static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
 	return ret;
 }
 
+static void diag_ioctl_query_session_pid(struct diag_query_pid_t *param)
+{
+	int prev_pid = 0, test_pid = 0, i = 0, count = 0;
+	uint32_t pd_mask = 0, peripheral_mask = 0;
+	struct diag_md_session_t *info = NULL;
+
+	param->pid = 0;
+
+	if (param->pd_mask && param->peripheral_mask) {
+		param->pid = -EINVAL;
+		return;
+	} else if (param->peripheral_mask) {
+		if (param->peripheral_mask == DIAG_CON_ALL) {
+			for (i = 0; i <= NUM_PERIPHERALS; i++) {
+				if (driver->md_session_map[i]) {
+					test_pid =
+					driver->md_session_map[i]->pid;
+					count++;
+					if (!prev_pid)
+						prev_pid = test_pid;
+					if (test_pid != prev_pid) {
+						DIAG_LOG(DIAG_DEBUG_USERSPACE,
+						"diag: One of the peripherals is being logged already\n");
+						param->pid = -EINVAL;
+					}
+				}
+			}
+			if (i == count && prev_pid)
+				param->pid = prev_pid;
+		} else {
+			peripheral_mask =
+				diag_translate_mask(param->peripheral_mask);
+			for (i = 0; i <= NUM_PERIPHERALS; i++) {
+				if (driver->md_session_map[i] &&
+					(peripheral_mask &
+					MD_PERIPHERAL_MASK(i))) {
+					info = driver->md_session_map[i];
+					if (peripheral_mask !=
+						info->peripheral_mask) {
+						DIAG_LOG(DIAG_DEBUG_USERSPACE,
+						"diag: Invalid Peripheral mask given as input\n");
+						param->pid = -EINVAL;
+						return;
+					}
+					test_pid = info->pid;
+					if (!prev_pid)
+						prev_pid = test_pid;
+					if (test_pid != prev_pid) {
+						DIAG_LOG(DIAG_DEBUG_USERSPACE,
+						"diag: One of the peripherals is logged in different session\n");
+						param->pid = -EINVAL;
+						return;
+					}
+				}
+			}
+			param->pid = prev_pid;
+		}
+	} else if (param->pd_mask) {
+		pd_mask =
+			diag_translate_mask(param->pd_mask);
+		for (i = UPD_WLAN; i < NUM_MD_SESSIONS; i++) {
+			if (driver->md_session_map[i] &&
+				(pd_mask & MD_PERIPHERAL_MASK(i))) {
+				info = driver->md_session_map[i];
+				if (pd_mask != info->peripheral_mask) {
+					DIAG_LOG(DIAG_DEBUG_USERSPACE,
+					"diag: Invalid PD mask given as input\n");
+					param->pid = -EINVAL;
+					return;
+				}
+				test_pid = info->pid;
+				if (!prev_pid)
+					prev_pid = test_pid;
+				if (test_pid != prev_pid) {
+					DIAG_LOG(DIAG_DEBUG_USERSPACE,
+					"diag: One of the PDs is being logged already\n");
+					param->pid = -EINVAL;
+					return;
+				}
+			}
+		}
+		param->pid = prev_pid;
+	}
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+	"diag: Pid for the active ODL session: %d\n", param->pid);
+}
+
 static int diag_ioctl_register_callback(unsigned long ioarg)
 {
 	int err = 0;
@@ -2469,6 +2556,7 @@ long diagchar_compat_ioctl(struct file *filp,
 	struct diag_dci_client_tbl *dci_client = NULL;
 	struct diag_logging_mode_param_t mode_param;
 	struct diag_con_all_param_t con_param;
+	struct diag_query_pid_t pid_query;
 
 	switch (iocmd) {
 	case DIAG_IOCTL_COMMAND_REG:
@@ -2596,6 +2684,22 @@ long diagchar_compat_ioctl(struct file *filp,
 		else
 			result = 0;
 		break;
+	case DIAG_IOCTL_QUERY_MD_PID:
+		if (copy_from_user((void *)&pid_query, (void __user *)ioarg,
+				   sizeof(pid_query))) {
+			result = -EFAULT;
+			break;
+		}
+		mutex_lock(&driver->md_session_lock);
+		diag_ioctl_query_session_pid(&pid_query);
+		mutex_unlock(&driver->md_session_lock);
+
+		if (copy_to_user((void __user *)ioarg, &pid_query,
+				sizeof(pid_query)))
+			result = -EFAULT;
+		else
+			result = 0;
+		break;
 	}
 	return result;
 }
@@ -2611,6 +2715,7 @@ long diagchar_ioctl(struct file *filp,
 	struct diag_dci_client_tbl *dci_client = NULL;
 	struct diag_logging_mode_param_t mode_param;
 	struct diag_con_all_param_t con_param;
+	struct diag_query_pid_t pid_query;
 
 	switch (iocmd) {
 	case DIAG_IOCTL_COMMAND_REG:
@@ -2738,6 +2843,23 @@ long diagchar_ioctl(struct file *filp,
 		else
 			result = 0;
 		break;
+	case DIAG_IOCTL_QUERY_MD_PID:
+		if (copy_from_user((void *)&pid_query, (void __user *)ioarg,
+				   sizeof(pid_query))) {
+			result = -EFAULT;
+			break;
+		}
+
+		mutex_lock(&driver->md_session_lock);
+		diag_ioctl_query_session_pid(&pid_query);
+		mutex_unlock(&driver->md_session_lock);
+
+		if (copy_to_user((void __user *)ioarg, &pid_query,
+				sizeof(pid_query)))
+			result = -EFAULT;
+		else
+			result = 0;
+		break;
 	}
 	return result;
 }
@@ -3083,7 +3205,7 @@ static int diag_user_process_raw_data(const char __user *buf, int len)
 static int diag_user_process_userspace_data(const char __user *buf, int len)
 {
 	int err = 0;
-	int max_retries = 3;
+	int max_retries = 50;
 	int retry_count = 0;
 	int remote_proc = 0;
 	int token_offset = 0;
@@ -3268,6 +3390,8 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 	int exit_stat = 0;
 	int write_len = 0;
 	struct diag_md_session_t *session_info = NULL;
+	struct pid *pid_struct = NULL;
+	struct task_struct *task_s = NULL;
 
 	mutex_lock(&driver->diagchar_mutex);
 	for (i = 0; i < driver->num_clients; i++)
@@ -3514,8 +3638,19 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
 		list_for_each_safe(start, temp, &driver->dci_client_list) {
 			entry = list_entry(start, struct diag_dci_client_tbl,
 									track);
-			if (entry->client->tgid != current->tgid)
+			pid_struct = find_get_pid(entry->tgid);
+			if (!pid_struct)
 				continue;
+			task_s = get_pid_task(pid_struct, PIDTYPE_PID);
+			if (!task_s) {
+				DIAG_LOG(DIAG_DEBUG_DCI,
+				"diag: valid task doesn't exist for pid = %d\n",
+				entry->tgid);
+				continue;
+			}
+			if (task_s == entry->client)
+				if (entry->client->tgid != current->tgid)
+					continue;
 			if (!entry->in_service)
 				continue;
 			if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index d19c494..9fef6ac 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -21,7 +21,7 @@
 #define FEATURE_SUPPORTED(x)	((feature_mask << (i * 8)) & (1 << x))
 
 /* tracks which peripheral is undergoing SSR */
-static uint16_t reg_dirty;
+static uint16_t reg_dirty[NUM_PERIPHERALS];
 static uint8_t diag_id = DIAG_ID_APPS;
 static void diag_notify_md_client(uint8_t peripheral, int data);
 
@@ -58,14 +58,14 @@ void diag_cntl_channel_close(struct diagfwd_info *p_info)
 
 	driver->feature[peripheral].sent_feature_mask = 0;
 	driver->feature[peripheral].rcvd_feature_mask = 0;
-	reg_dirty |= PERIPHERAL_MASK(peripheral);
+	reg_dirty[peripheral] = 1;
 	diag_cmd_remove_reg_by_proc(peripheral);
 	driver->diag_id_sent[peripheral] = 0;
 	driver->feature[peripheral].stm_support = DISABLE_STM;
 	driver->feature[peripheral].log_on_demand = 0;
 	driver->stm_state[peripheral] = DISABLE_STM;
 	driver->stm_state_requested[peripheral] = DISABLE_STM;
-	reg_dirty ^= PERIPHERAL_MASK(peripheral);
+	reg_dirty[peripheral] = 0;
 	diag_notify_md_client(peripheral, DIAG_STATUS_CLOSED);
 }
 
@@ -869,7 +869,7 @@ void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
 	if (!buf || len <= 0 || !p_info)
 		return;
 
-	if (reg_dirty & PERIPHERAL_MASK(p_info->peripheral)) {
+	if (reg_dirty[p_info->peripheral]) {
 		pr_err_ratelimited("diag: dropping command registration from peripheral %d\n",
 		       p_info->peripheral);
 		return;
@@ -1644,13 +1644,14 @@ int diagfwd_cntl_init(void)
 {
 	uint8_t peripheral = 0;
 
-	reg_dirty = 0;
 	driver->polling_reg_flag = 0;
 	driver->log_on_demand_support = 1;
 	driver->stm_peripheral = 0;
 	driver->close_transport = 0;
-	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++)
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
 		driver->buffering_flag[peripheral] = 0;
+		reg_dirty[peripheral] = 0;
+	}
 
 	mutex_init(&driver->cntl_lock);
 	INIT_WORK(&(driver->stm_update_work), diag_stm_update_work_fn);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 84e655d..fce7ab4 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -67,7 +67,9 @@ struct clk_core {
 	unsigned long		rate;
 	unsigned long		req_rate;
 	unsigned long		new_rate;
+	unsigned long		old_rate;
 	struct clk_core		*new_parent;
+	struct clk_core		*old_parent;
 	struct clk_core		*new_child;
 	unsigned long		flags;
 	bool			orphan;
@@ -2081,7 +2083,7 @@ static int clk_change_rate(struct clk_core *core)
 	struct clk_core *parent = NULL;
 	int rc = 0;
 
-	old_rate = core->rate;
+	core->old_rate = old_rate = core->rate;
 
 	if (core->new_parent) {
 		parent = core->new_parent;
@@ -2095,6 +2097,8 @@ static int clk_change_rate(struct clk_core *core)
 	if (rc)
 		return rc;
 
+	core->old_parent = core->parent;
+
 	if (core->flags & CLK_SET_RATE_UNGATE) {
 		unsigned long flags;
 
@@ -2106,15 +2110,9 @@ static int clk_change_rate(struct clk_core *core)
 
 	trace_clk_set_rate(core, core->new_rate);
 
-	/* Enforce vdd requirements for new frequency. */
-	if (core->prepare_count) {
-		rc = clk_vote_rate_vdd(core, core->new_rate);
-		if (rc)
-			goto out;
-	}
-
 	if (core->new_parent && core->new_parent != core->parent) {
 		old_parent = __clk_set_parent_before(core, core->new_parent);
+		core->old_parent = old_parent;
 		trace_clk_set_parent(core, core->new_parent);
 
 		if (core->ops->set_rate_and_parent) {
@@ -2144,10 +2142,6 @@ static int clk_change_rate(struct clk_core *core)
 
 	trace_clk_set_rate_complete(core, core->new_rate);
 
-	/* Release vdd requirements for old frequency. */
-	if (core->prepare_count)
-		clk_unvote_rate_vdd(core, old_rate);
-
 	core->rate = clk_recalc(core, best_parent_rate);
 
 	if (core->flags & CLK_SET_RATE_UNGATE) {
@@ -2185,13 +2179,7 @@ static int clk_change_rate(struct clk_core *core)
 	if (core->new_child)
 		rc = clk_change_rate(core->new_child);
 
-	clk_pm_runtime_put(core);
-	return rc;
-
 err_set_rate:
-	if (core->prepare_count)
-		clk_unvote_rate_vdd(core, core->new_rate);
-out:
 	clk_pm_runtime_put(core);
 	return rc;
 }
@@ -2223,6 +2211,74 @@ static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
 	return ret ? 0 : req.rate;
 }
 
+static int vote_vdd_up(struct clk_core *core)
+{
+	struct clk_core *parent = NULL;
+	int ret, cur_level, next_level;
+
+	/* sanity */
+	if (IS_ERR_OR_NULL(core))
+		return 0;
+
+	if (core->vdd_class) {
+		cur_level = clk_find_vdd_level(core, core->rate);
+		next_level = clk_find_vdd_level(core, core->new_rate);
+		if (cur_level == next_level)
+			return 0;
+	}
+
+	/* save parent rate, if it exists */
+	if (core->new_parent)
+		parent = core->new_parent;
+	else if (core->parent)
+		parent = core->parent;
+
+	if (core->prepare_count && core->new_rate) {
+		ret = clk_vote_rate_vdd(core, core->new_rate);
+		if (ret)
+			return ret;
+	}
+
+	vote_vdd_up(parent);
+
+	return 0;
+}
+
+static int vote_vdd_down(struct clk_core *core)
+{
+	struct clk_core *parent;
+	unsigned long rate;
+	int cur_level, old_level;
+
+	/* sanity */
+	if (IS_ERR_OR_NULL(core))
+		return 0;
+
+	rate = core->old_rate;
+
+	/* New rate set was a failure */
+	if (DIV_ROUND_CLOSEST(core->rate, 1000) !=
+		DIV_ROUND_CLOSEST(core->new_rate, 1000))
+		rate = core->new_rate;
+
+	if (core->vdd_class) {
+		cur_level = clk_find_vdd_level(core, core->rate);
+		old_level = clk_find_vdd_level(core, core->old_rate);
+		if ((cur_level == old_level)
+			|| !core->vdd_class->level_votes[old_level])
+			return 0;
+	}
+
+	parent = core->old_parent;
+
+	if (core->prepare_count && rate)
+		clk_unvote_rate_vdd(core, rate);
+
+	vote_vdd_down(parent);
+
+	return 0;
+}
+
 static int clk_core_set_rate_nolock(struct clk_core *core,
 				    unsigned long req_rate)
 {
@@ -2262,16 +2318,26 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
 		goto err;
 	}
 
+	/* Enforce the VDD for new frequency */
+	ret = vote_vdd_up(core);
+	if (ret)
+		goto err;
+
 	/* change the rates */
 	ret = clk_change_rate(top);
 	if (ret) {
 		pr_err("%s: failed to set %s clock to run at %lu\n", __func__,
 				top->name, req_rate);
 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
-		return ret;
+		/* Release vdd requirements for new frequency. */
+		vote_vdd_down(core);
+		goto err;
 	}
 
 	core->req_rate = req_rate;
+	/* Release vdd requirements for old frequency. */
+	vote_vdd_down(core);
+
 err:
 	clk_pm_runtime_put(core);
 
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1243e39..f21fedb 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -347,3 +347,13 @@
 	  KONA devices.
 	  Say Y if you want to enable use of the Network Processing Unit in
 	  order to speed up certain types of calculations.
+
+config SM_GCC_LITO
+	tristate "LITO Global Clock Controller"
+	depends on COMMON_CLK_QCOM
+	select QCOM_GDSC
+	help
+	  Support for the global clock controller on Qualcomm Technologies, Inc
+	  LITO devices.
+	  Say Y if you want to use peripheral devices such as UART, SPI, I2C,
+	  USB, UFS, SD/eMMC, PCIe, etc.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 8104446..4437ab3 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -52,6 +52,7 @@
 obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
 obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
 obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
+obj-$(CONFIG_SM_GCC_LITO) += gcc-lito.o
 obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
 
 obj-y += mdss/
diff --git a/drivers/clk/qcom/clk-debug.c b/drivers/clk/qcom/clk-debug.c
index 38d2422..7169fd2 100644
--- a/drivers/clk/qcom/clk-debug.c
+++ b/drivers/clk/qcom/clk-debug.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2016, 2019, The Linux Foundation. All rights reserved. */
 
 #include <linux/clk.h>
 #include <linux/export.h>
@@ -269,7 +269,7 @@ void clk_debug_measure_add(struct clk_hw *hw, struct dentry *dentry)
 		return;
 	}
 
-	debugfs_create_file("clk_measure", 0x444, dentry, hw,
+	debugfs_create_file("clk_measure", 0444, dentry, hw,
 					&clk_measure_fops);
 }
 EXPORT_SYMBOL(clk_debug_measure_add);
diff --git a/drivers/clk/qcom/gcc-lito.c b/drivers/clk/qcom/gcc-lito.c
new file mode 100644
index 0000000..c0e960d
--- /dev/null
+++ b/drivers/clk/qcom/gcc-lito.c
@@ -0,0 +1,2805 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-lito.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+#include "vdd-level.h"
+
+#define GCC_NPU_MISC				0x4d110
+#define GCC_GPU_MISC				0x71028
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_cx_ao, VDD_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CORE_BI_PLL_TEST_SE,
+	P_GPLL0_OUT_EVEN,
+	P_GPLL0_OUT_MAIN,
+	P_GPLL10_OUT_MAIN,
+	P_GPLL1_OUT_MAIN,
+	P_GPLL4_OUT_MAIN,
+	P_GPLL6_OUT_MAIN,
+	P_GPLL9_OUT_MAIN,
+	P_SLEEP_CLK,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 1 },
+	{ P_GPLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const char * const gcc_parent_names_0_ao[] = {
+	"bi_tcxo_ao",
+	"gpll0",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 1 },
+	{ P_SLEEP_CLK, 5 },
+	{ P_GPLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+	"bi_tcxo",
+	"gpll0",
+	"sleep_clk",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+	"bi_tcxo",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 1 },
+	{ P_GPLL10_OUT_MAIN, 2 },
+	{ P_GPLL1_OUT_MAIN, 4 },
+	{ P_GPLL4_OUT_MAIN, 5 },
+	{ P_GPLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll10",
+	"gpll1",
+	"gpll4",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 1 },
+	{ P_GPLL6_OUT_MAIN, 2 },
+	{ P_GPLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll6",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_GPLL0_OUT_MAIN, 1 },
+	{ P_GPLL9_OUT_MAIN, 2 },
+	{ P_GPLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+	"bi_tcxo",
+	"gpll0",
+	"gpll9",
+	"gpll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_SLEEP_CLK, 5 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+	"bi_tcxo",
+	"sleep_clk",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco lucid_vco[] = {
+	{ 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+	.offset = 0x0,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.enable_reg = 0x52010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fixed_lucid_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+	{ 0x1, 2 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_gpll0_out_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+	.width = 4,
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gpll0_out_even",
+		.parent_names = (const char *[]){ "gpll0" },
+		.num_parents = 1,
+		.ops = &clk_alpha_pll_postdiv_lucid_ops,
+	},
+};
+
+static struct clk_alpha_pll gpll6 = {
+	.offset = 0x13000,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.enable_reg = 0x52010,
+		.enable_mask = BIT(6),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll6",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fixed_lucid_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static struct clk_alpha_pll gpll9 = {
+	.offset = 0x1c000,
+	.vco_table = lucid_vco,
+	.num_vco = ARRAY_SIZE(lucid_vco),
+	.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+	.clkr = {
+		.enable_reg = 0x52010,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gpll9",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_alpha_pll_fixed_lucid_ops,
+			.vdd_class = &vdd_cx,
+			.num_rate_max = VDD_NUM,
+			.rate_max = (unsigned long[VDD_NUM]) {
+				[VDD_MIN] = 615000000,
+				[VDD_LOW] = 1066000000,
+				[VDD_LOW_L1] = 1600000000,
+				[VDD_NOMINAL] = 2000000000},
+		},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+	.cmd_rcgr = 0x48010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_cpuss_ahb_clk_src",
+		.parent_names = gcc_parent_names_0_ao,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx_ao,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 50000000,
+			[VDD_NOMINAL] = 100000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_dpm_clk_src[] = {
+	F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_dpm_clk_src = {
+	.cmd_rcgr = 0x4600c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_dpm_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_dpm_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW] = 150000000,
+			[VDD_LOW_L1] = 200000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+	.cmd_rcgr = 0x64004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = ftbl_gcc_gp1_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_gp1_clk_src",
+		.parent_names = gcc_parent_names_1,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000},
+	},
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+	.cmd_rcgr = 0x65004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = NULL,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_gp2_clk_src",
+		.parent_names = gcc_parent_names_1,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000},
+	},
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+	.cmd_rcgr = 0x66004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_1,
+	.freq_tbl = NULL,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_gp3_clk_src",
+		.parent_names = gcc_parent_names_1,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000},
+	},
+};
+
+static struct clk_init_data gcc_npu_dma_clk_init = {
+	.name = "gcc_npu_dma_clk_src",
+	.parent_names = gcc_parent_names_3,
+	.num_parents = 7,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 300000000,
+		[VDD_LOW] = 403000000,
+		[VDD_LOW_L1] = 533000000,
+		[VDD_NOMINAL] = 710666667,
+		[VDD_HIGH] = 806000000},
+};
+
+static struct clk_rcg2 gcc_npu_dma_clk_src = {
+	.cmd_rcgr = 0x4d01c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_3,
+	.clkr.hw.init = &gcc_npu_dma_clk_init,
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+	F(9600000, P_BI_TCXO, 2, 0, 0),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+	.cmd_rcgr = 0x33010,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_pdm2_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_pdm2_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000,
+			[VDD_LOW] = 60000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+	F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+	F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+	F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+	F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+	F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+	F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+	F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+	F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+	F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+	F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+	F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+	F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+	F(128000000, P_GPLL0_OUT_MAIN, 1, 16, 75),
+	{ }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_init = {
+	.name = "gcc_qupv3_wrap0_s0_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+	.cmd_rcgr = 0x17010,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_init = {
+	.name = "gcc_qupv3_wrap0_s1_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+	.cmd_rcgr = 0x17140,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_init = {
+	.name = "gcc_qupv3_wrap0_s2_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+	.cmd_rcgr = 0x17270,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_init = {
+	.name = "gcc_qupv3_wrap0_s3_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+	.cmd_rcgr = 0x173a0,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_init = {
+	.name = "gcc_qupv3_wrap0_s4_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+	.cmd_rcgr = 0x174d0,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_init = {
+	.name = "gcc_qupv3_wrap0_s5_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+	.cmd_rcgr = 0x17600,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_init = {
+	.name = "gcc_qupv3_wrap1_s0_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+	.cmd_rcgr = 0x18010,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_init = {
+	.name = "gcc_qupv3_wrap1_s1_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+	.cmd_rcgr = 0x18140,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_init = {
+	.name = "gcc_qupv3_wrap1_s2_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+	.cmd_rcgr = 0x18270,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_init = {
+	.name = "gcc_qupv3_wrap1_s3_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+	.cmd_rcgr = 0x183a0,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_init = {
+	.name = "gcc_qupv3_wrap1_s4_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+	.cmd_rcgr = 0x184d0,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_init = {
+	.name = "gcc_qupv3_wrap1_s5_clk_src",
+	.parent_names = gcc_parent_names_0,
+	.num_parents = 4,
+	.ops = &clk_rcg2_ops,
+	.vdd_class = &vdd_cx,
+	.num_rate_max = VDD_NUM,
+	.rate_max = (unsigned long[VDD_NUM]) {
+		[VDD_LOWER] = 75000000,
+		[VDD_LOW] = 100000000,
+		[VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+	.cmd_rcgr = 0x18600,
+	.mnd_width = 16,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+	.clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+	F(144000, P_BI_TCXO, 16, 3, 25),
+	F(400000, P_BI_TCXO, 12, 1, 4),
+	F(20000000, P_GPLL0_OUT_EVEN, 5, 1, 3),
+	F(25000000, P_GPLL0_OUT_EVEN, 6, 1, 2),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+	F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+	F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+	.cmd_rcgr = 0x26024,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_4,
+	.freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_sdcc1_apps_clk_src",
+		.parent_names = gcc_parent_names_4,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW_L1] = 384000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+	F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+	F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+	.cmd_rcgr = 0x2600c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_sdcc1_ice_core_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW] = 150000000,
+			[VDD_LOW_L1] = 300000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+	F(400000, P_BI_TCXO, 12, 1, 4),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(25000000, P_GPLL0_OUT_EVEN, 6, 1, 2),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+	F(202000000, P_GPLL9_OUT_MAIN, 4, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+	.cmd_rcgr = 0x1400c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_5,
+	.freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_sdcc2_apps_clk_src",
+		.parent_names = gcc_parent_names_5,
+		.num_parents = 5,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000,
+			[VDD_LOW_L1] = 202000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+	F(400000, P_BI_TCXO, 12, 1, 4),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(25000000, P_GPLL0_OUT_EVEN, 6, 1, 2),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+	.cmd_rcgr = 0x1600c,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_sdcc4_apps_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 100000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+	F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+	.cmd_rcgr = 0x77024,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_axi_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 50000000,
+			[VDD_LOW] = 100000000,
+			[VDD_NOMINAL] = 200000000,
+			[VDD_HIGH] = 240000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+	F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+	F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
+	F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+	.cmd_rcgr = 0x7706c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_ice_core_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 75000000,
+			[VDD_LOW] = 150000000,
+			[VDD_NOMINAL] = 300000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+	.cmd_rcgr = 0x770a0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_2,
+	.freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_phy_aux_clk_src",
+		.parent_names = gcc_parent_names_2,
+		.num_parents = 2,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+	F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0),
+	F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+	F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+	.cmd_rcgr = 0x77084,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_ufs_phy_unipro_core_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 37500000,
+			[VDD_LOW] = 75000000,
+			[VDD_NOMINAL] = 150000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+	F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0),
+	F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+	F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+	.cmd_rcgr = 0xf020,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+	.enable_safe_config = true,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_usb30_prim_master_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 66666667,
+			[VDD_LOW] = 133333333,
+			[VDD_NOMINAL] = 200000000,
+			[VDD_HIGH] = 240000000},
+	},
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+	F(60000000, P_GPLL0_OUT_EVEN, 5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+	.cmd_rcgr = 0xf038,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_0,
+	.freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_usb30_prim_mock_utmi_clk_src",
+		.parent_names = gcc_parent_names_0,
+		.num_parents = 4,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 60000000},
+	},
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+	.cmd_rcgr = 0xf064,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = gcc_parent_map_6,
+	.freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "gcc_usb3_prim_phy_aux_clk_src",
+		.parent_names = gcc_parent_names_6,
+		.num_parents = 3,
+		.ops = &clk_rcg2_ops,
+		.vdd_class = &vdd_cx,
+		.num_rate_max = VDD_NUM,
+		.rate_max = (unsigned long[VDD_NUM]) {
+			[VDD_LOWER] = 19200000},
+	},
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+	.halt_reg = 0x770cc,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x770cc,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x770cc,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_aggre_ufs_phy_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+	.halt_reg = 0xf080,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xf080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_aggre_usb3_prim_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+	.halt_reg = 0x38004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x38004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_boot_rom_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+	.halt_reg = 0xb008,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb008,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0xb008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+	.halt_reg = 0xb028,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb028,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_hf_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+	.halt_reg = 0xb02c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb02c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_sf_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_throttle_hf_axi_clk = {
+	.halt_reg = 0xb074,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb074,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_throttle_hf_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_throttle_sf_axi_clk = {
+	.halt_reg = 0xb078,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_throttle_sf_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+	.halt_reg = 0xb03c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb03c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_camera_xo_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+	.halt_reg = 0xf07c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xf07c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cfg_noc_usb3_prim_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+	.halt_reg = 0x48000,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(21),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cpuss_ahb_clk",
+			.parent_names = (const char *[]){
+				"gcc_cpuss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+	.halt_reg = 0x48064,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x48064,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(29),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cpuss_gnoc_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+	.halt_reg = 0x48004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x48004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_cpuss_rbcpr_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+	.halt_reg = 0x71154,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x71154,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ddrss_gpu_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+	.halt_reg = 0xb00c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb00c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0xb00c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_gpll0_clk_src = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(20),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_gpll0_clk_src",
+			.parent_names = (const char *[]){
+				"gpll0",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+	.halt_reg = 0xb030,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb030,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_hf_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_sf_axi_clk = {
+	.halt_reg = 0xb034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_sf_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_throttle_hf_axi_clk = {
+	.halt_reg = 0xb06c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb06c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_throttle_hf_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_throttle_sf_axi_clk = {
+	.halt_reg = 0xb070,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb070,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_throttle_sf_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+	.halt_reg = 0xb040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_disp_xo_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_dpm_ahb_clk = {
+	.halt_reg = 0x46008,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x46008,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x46008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_dpm_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_dpm_clk = {
+	.halt_reg = 0x46004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x46004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_dpm_clk",
+			.parent_names = (const char *[]){
+				"gcc_dpm_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp1_clk = {
+	.halt_reg = 0x64000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x64000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp1_clk",
+			.parent_names = (const char *[]){
+				"gcc_gp1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp2_clk = {
+	.halt_reg = 0x65000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x65000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp2_clk",
+			.parent_names = (const char *[]){
+				"gcc_gp2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gp3_clk = {
+	.halt_reg = 0x66000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x66000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gp3_clk",
+			.parent_names = (const char *[]){
+				"gcc_gp3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+	.halt_reg = 0x71004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x71004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x71004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_cfg_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(15),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_gpll0_clk_src",
+			.parent_names = (const char *[]){
+				"gpll0",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(16),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_gpll0_div_clk_src",
+			.parent_names = (const char *[]){
+				"gpll0_out_even",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+	.halt_reg = 0x8c014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8c014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_iref_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+	.halt_reg = 0x7100c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7100c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_memnoc_gfx_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+	.halt_reg = 0x71018,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x71018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_gpu_snoc_dvm_gfx_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_axi_clk = {
+	.halt_reg = 0x4d008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4d008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_bwmon2_axi_clk = {
+	.halt_reg = 0x7000c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7000c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_bwmon2_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_bwmon_axi_clk = {
+	.halt_reg = 0x70008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x70008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_bwmon_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_bwmon_cfg_ahb_clk = {
+	.halt_reg = 0x70004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x70004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_bwmon_cfg_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_cfg_ahb_clk = {
+	.halt_reg = 0x4d004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x4d004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x4d004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_cfg_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_dma_clk = {
+	.halt_reg = 0x4d00c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x4d00c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_dma_clk",
+			.parent_names = (const char *[]){
+				"gcc_npu_dma_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_gpll0_clk_src = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(18),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_gpll0_clk_src",
+			.parent_names = (const char *[]){
+				"gpll0",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+	.halt_check = BRANCH_HALT_DELAY,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(19),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_npu_gpll0_div_clk_src",
+			.parent_names = (const char *[]){
+				"gpll0_out_even",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+	.halt_reg = 0x3300c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x3300c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pdm2_clk",
+			.parent_names = (const char *[]){
+				"gcc_pdm2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+	.halt_reg = 0x33004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x33004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x33004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pdm_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+	.halt_reg = 0x33008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x33008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_pdm_xo4_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+	.halt_reg = 0x34004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x34004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(13),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_prng_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+	.halt_reg = 0xb018,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb018,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0xb018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_camera_nrt_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+	.halt_reg = 0xb01c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb01c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0xb01c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_camera_rt_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+	.halt_reg = 0xb020,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb020,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0xb020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_disp_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_rt_disp_ahb_clk = {
+	.halt_reg = 0xb07c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb07c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0xb07c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_rt_disp_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+	.halt_reg = 0xb010,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb010,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0xb010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_video_cvp_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+	.halt_reg = 0xb014,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb014,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0xb014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qmip_video_vcodec_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+	.halt_reg = 0x23008,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(9),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_core_2x_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+	.halt_reg = 0x23000,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(8),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_core_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+	.halt_reg = 0x1700c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(10),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s0_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+	.halt_reg = 0x1713c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(11),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s1_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+	.halt_reg = 0x1726c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(12),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s2_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+	.halt_reg = 0x1739c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(13),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s3_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+	.halt_reg = 0x174cc,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(14),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s4_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s4_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+	.halt_reg = 0x175fc,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(15),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap0_s5_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap0_s5_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+	.halt_reg = 0x23140,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(18),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_core_2x_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+	.halt_reg = 0x23138,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(19),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_core_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+	.halt_reg = 0x1800c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(22),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s0_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+	.halt_reg = 0x1813c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(23),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s1_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+	.halt_reg = 0x1826c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(24),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s2_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+	.halt_reg = 0x1839c,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(25),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s3_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+	.halt_reg = 0x184cc,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(26),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s4_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s4_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+	.halt_reg = 0x185fc,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(27),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap1_s5_clk",
+			.parent_names = (const char *[]){
+				"gcc_qupv3_wrap1_s5_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+	.halt_reg = 0x17004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(6),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap_0_m_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+	.halt_reg = 0x17008,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x17008,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(7),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap_0_s_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+	.halt_reg = 0x18004,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(20),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap_1_m_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+	.halt_reg = 0x18008,
+	.halt_check = BRANCH_HALT_VOTED,
+	.hwcg_reg = 0x18008,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x52008,
+		.enable_mask = BIT(21),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_qupv3_wrap_1_s_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+	.halt_reg = 0x26004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x26004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+	.halt_reg = 0x26008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x26008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_apps_clk",
+			.parent_names = (const char *[]){
+				"gcc_sdcc1_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+	.halt_reg = 0x2603c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x2603c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc1_ice_core_clk",
+			.parent_names = (const char *[]){
+				"gcc_sdcc1_ice_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+	.halt_reg = 0x14008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x14008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc2_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+	.halt_reg = 0x14004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x14004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc2_apps_clk",
+			.parent_names = (const char *[]){
+				"gcc_sdcc2_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+	.halt_reg = 0x16008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x16008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc4_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+	.halt_reg = 0x16004,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x16004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sdcc4_apps_clk",
+			.parent_names = (const char *[]){
+				"gcc_sdcc4_apps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+	.halt_reg = 0x48068,
+	.halt_check = BRANCH_HALT_VOTED,
+	.clkr = {
+		.enable_reg = 0x52000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_sys_noc_cpuss_ahb_clk",
+			.parent_names = (const char *[]){
+				"gcc_cpuss_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_1x_clkref_clk = {
+	.halt_reg = 0x8c000,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8c000,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_1x_clkref_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+	.halt_reg = 0x77018,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x77018,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x77018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+	.halt_reg = 0x77010,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x77010,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x77010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_axi_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_axi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+	.halt_reg = 0x77064,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x77064,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x77064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_ice_core_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_ice_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+	.halt_reg = 0x7709c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x7709c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x7709c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_phy_aux_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_phy_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+	.halt_reg = 0x77020,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x77020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_rx_symbol_0_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+	.halt_reg = 0x770b8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x770b8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_rx_symbol_1_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+	.halt_reg = 0x7701c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7701c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_tx_symbol_0_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+	.halt_reg = 0x7705c,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0x7705c,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0x7705c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_ufs_phy_unipro_core_clk",
+			.parent_names = (const char *[]){
+				"gcc_ufs_phy_unipro_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+	.halt_reg = 0xf010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xf010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_prim_master_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_master_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+	.halt_reg = 0xf01c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xf01c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_prim_mock_utmi_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb30_prim_mock_utmi_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+	.halt_reg = 0xf018,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xf018,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb30_prim_sleep_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+	.halt_reg = 0x8c010,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8c010,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_clkref_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+	.halt_reg = 0xf054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xf054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_phy_aux_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb3_prim_phy_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+	.halt_reg = 0xf058,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xf058,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_phy_com_aux_clk",
+			.parent_names = (const char *[]){
+				"gcc_usb3_prim_phy_aux_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+	.halt_reg = 0xf05c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xf05c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_usb3_prim_phy_pipe_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+	.halt_reg = 0xb004,
+	.halt_check = BRANCH_HALT,
+	.hwcg_reg = 0xb004,
+	.hwcg_bit = 1,
+	.clkr = {
+		.enable_reg = 0xb004,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_ahb_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_axi_clk = {
+	.halt_reg = 0xb080,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_throttle1_axi_clk = {
+	.halt_reg = 0xb084,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb084,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_throttle1_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_throttle_axi_clk = {
+	.halt_reg = 0xb024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_throttle_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+	.halt_reg = 0xb038,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "gcc_video_xo_clk",
+			.flags = CLK_IS_CRITICAL,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *gcc_lito_clocks[] = {
+	[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+	[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+	[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+	[GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+	[GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+	[GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+	[GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
+	[GCC_CAMERA_THROTTLE_SF_AXI_CLK] = &gcc_camera_throttle_sf_axi_clk.clkr,
+	[GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+	[GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+	[GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+	[GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+	[GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+	[GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+	[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+	[GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+	[GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+	[GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+	[GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+	[GCC_DISP_THROTTLE_HF_AXI_CLK] = &gcc_disp_throttle_hf_axi_clk.clkr,
+	[GCC_DISP_THROTTLE_SF_AXI_CLK] = &gcc_disp_throttle_sf_axi_clk.clkr,
+	[GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+	[GCC_DPM_AHB_CLK] = &gcc_dpm_ahb_clk.clkr,
+	[GCC_DPM_CLK] = &gcc_dpm_clk.clkr,
+	[GCC_DPM_CLK_SRC] = &gcc_dpm_clk_src.clkr,
+	[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+	[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+	[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+	[GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+	[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+	[GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+	[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+	[GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+	[GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+	[GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+	[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+	[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+	[GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
+	[GCC_NPU_BWMON2_AXI_CLK] = &gcc_npu_bwmon2_axi_clk.clkr,
+	[GCC_NPU_BWMON_AXI_CLK] = &gcc_npu_bwmon_axi_clk.clkr,
+	[GCC_NPU_BWMON_CFG_AHB_CLK] = &gcc_npu_bwmon_cfg_ahb_clk.clkr,
+	[GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr,
+	[GCC_NPU_DMA_CLK] = &gcc_npu_dma_clk.clkr,
+	[GCC_NPU_DMA_CLK_SRC] = &gcc_npu_dma_clk_src.clkr,
+	[GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
+	[GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
+	[GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+	[GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+	[GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+	[GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+	[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+	[GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+	[GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+	[GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+	[GCC_QMIP_RT_DISP_AHB_CLK] = &gcc_qmip_rt_disp_ahb_clk.clkr,
+	[GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+	[GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+	[GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+	[GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+	[GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+	[GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+	[GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+	[GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+	[GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+	[GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+	[GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+	[GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+	[GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+	[GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+	[GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+	[GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+	[GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+	[GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+	[GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+	[GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+	[GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+	[GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+	[GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+	[GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+	[GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+	[GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+	[GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+	[GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+	[GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+	[GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+	[GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+	[GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+	[GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+	[GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+	[GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+	[GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+	[GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+	[GCC_UFS_1X_CLKREF_CLK] = &gcc_ufs_1x_clkref_clk.clkr,
+	[GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+	[GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+	[GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+	[GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+	[GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+	[GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+	[GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+	[GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+	[GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+	[GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+	[GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+	[GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+		&gcc_ufs_phy_unipro_core_clk_src.clkr,
+	[GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+	[GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+	[GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+	[GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+		&gcc_usb30_prim_mock_utmi_clk_src.clkr,
+	[GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+	[GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+	[GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+	[GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+	[GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+	[GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+	[GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+	[GCC_VIDEO_AXI_CLK] = &gcc_video_axi_clk.clkr,
+	[GCC_VIDEO_THROTTLE1_AXI_CLK] = &gcc_video_throttle1_axi_clk.clkr,
+	[GCC_VIDEO_THROTTLE_AXI_CLK] = &gcc_video_throttle_axi_clk.clkr,
+	[GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+	[GPLL0] = &gpll0.clkr,
+	[GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+	[GPLL6] = &gpll6.clkr,
+	[GPLL9] = &gpll9.clkr,
+};
+
+static const struct qcom_reset_map gcc_lito_resets[] = {
+	[GCC_DPM_BCR] = { 0x46000 },
+	[GCC_GPU_BCR] = { 0x71000 },
+	[GCC_MMSS_BCR] = { 0xb000 },
+	[GCC_NPU_BWMON_BCR] = { 0x70000 },
+	[GCC_NPU_BCR] = { 0x4d000 },
+	[GCC_PDM_BCR] = { 0x33000 },
+	[GCC_PRNG_BCR] = { 0x34000 },
+	[GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 },
+	[GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 },
+	[GCC_SDCC1_BCR] = { 0x26000 },
+	[GCC_SDCC2_BCR] = { 0x14000 },
+	[GCC_SDCC4_BCR] = { 0x16000 },
+	[GCC_UFS_PHY_BCR] = { 0x77000 },
+	[GCC_USB30_PRIM_BCR] = { 0xf000 },
+	[GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+	DEFINE_RCG_DFS(gcc_npu_dma_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk),
+	DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk),
+};
+
+static const struct regmap_config gcc_lito_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0x9c100,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc gcc_lito_desc = {
+	.config = &gcc_lito_regmap_config,
+	.clks = gcc_lito_clocks,
+	.num_clks = ARRAY_SIZE(gcc_lito_clocks),
+	.resets = gcc_lito_resets,
+	.num_resets = ARRAY_SIZE(gcc_lito_resets),
+};
+
+static const struct of_device_id gcc_lito_match_table[] = {
+	{ .compatible = "qcom,gcc-lito" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, gcc_lito_match_table);
+
+static int gcc_lito_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	int ret;
+
+	regmap = qcom_cc_map(pdev, &gcc_lito_desc);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	vdd_cx_ao.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx_ao");
+	if (IS_ERR(vdd_cx_ao.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx_ao.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx_ao regulator\n");
+		return PTR_ERR(vdd_cx_ao.regulator[0]);
+	}
+
+	ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+			ARRAY_SIZE(gcc_dfs_clocks));
+	if (ret)
+		return ret;
+
+	/* Disable the GPLL0 active input to NPU and GPU via MISC registers */
+	regmap_update_bits(regmap, GCC_NPU_MISC, 0x3, 0x3);
+	regmap_update_bits(regmap, GCC_GPU_MISC, 0x3, 0x3);
+
+	ret = qcom_cc_really_probe(pdev, &gcc_lito_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register GCC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered GCC clocks\n");
+	return ret;
+}
+
+static struct platform_driver gcc_lito_driver = {
+	.probe	= gcc_lito_probe,
+	.driver	= {
+		.name = "gcc-lito",
+		.of_match_table = gcc_lito_match_table,
+	},
+};
+
+static int __init gcc_lito_init(void)
+{
+	return platform_driver_register(&gcc_lito_driver);
+}
+subsys_initcall(gcc_lito_init);
+
+static void __exit gcc_lito_exit(void)
+{
+	platform_driver_unregister(&gcc_lito_driver);
+}
+module_exit(gcc_lito_exit);
+
+MODULE_DESCRIPTION("QTI GCC LITO Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-lito");
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index b94e4eb..991e11f 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -122,6 +122,24 @@
 
 comment "DEVFREQ Drivers"
 
+config DEVFREQ_GOV_QCOM_ADRENO_TZ
+	tristate "Qualcomm Technologies Inc Adreno Trustzone"
+	depends on QCOM_KGSL && QCOM_SCM
+	help
+	   Trustzone based governor for the Adreno GPU. Sets
+	   the frequency using a "on-demand" algorithm. This
+	   governor is unlikely to be useful for other
+	   devices.
+
+config DEVFREQ_GOV_QCOM_GPUBW_MON
+	tristate "GPU BW voting governor"
+	depends on DEVFREQ_GOV_QCOM_ADRENO_TZ
+	help
+	  This governor works together with Adreno Trustzone governor,
+	  and select bus frequency votes using an "on-demand" alorithm.
+	  This governor is unlikely to be useful for non-Adreno
+	  devices.
+
 config ARM_EXYNOS_BUS_DEVFREQ
 	tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
 	depends on ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 27a14bf..dc0768e 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -11,6 +11,8 @@
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON)	+= governor_bw_hwmon.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON)	+= governor_cache_hwmon.o
 obj-$(CONFIG_DEVFREQ_GOV_MEMLAT)       += governor_memlat.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_ADRENO_TZ) += governor_msm_adreno_tz.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_bw_vbif.o
 
 # DEVFREQ Drivers
 obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ)	+= exynos-bus.o
diff --git a/drivers/devfreq/devfreq_trace.h b/drivers/devfreq/devfreq_trace.h
new file mode 100644
index 0000000..8a20455
--- /dev/null
+++ b/drivers/devfreq/devfreq_trace.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_DEVFREQ_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DEVFREQ_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM devfreq
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE devfreq_trace
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(devfreq_msg,
+	TP_PROTO(const char *msg),
+	TP_ARGS(msg),
+	TP_STRUCT__entry(
+		__string(msg, msg)
+	),
+	TP_fast_assign(
+		__assign_str(msg, msg);
+	),
+	TP_printk(
+		"%s", __get_str(msg)
+	)
+);
+
+#endif /* _DEVFREQ_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/drivers/devfreq/governor_bw_vbif.c b/drivers/devfreq/governor_bw_vbif.c
new file mode 100644
index 0000000..98bef4d
--- /dev/null
+++ b/drivers/devfreq/governor_bw_vbif.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/devfreq.h>
+#include <linux/module.h>
+#include "governor.h"
+
+unsigned long (*extern_get_bw)(void) = NULL;
+unsigned long *dev_ab;
+static unsigned long dev_ib;
+
+DEFINE_MUTEX(df_lock);
+static struct devfreq *df;
+
+/*
+ * This function is 'get_target_freq' API for the governor.
+ * It just calls an external function that should be registered
+ * by KGSL driver to get and return a value for frequency.
+ */
+static int devfreq_vbif_get_freq(struct devfreq *df,
+				unsigned long *freq)
+{
+	/* If the IB isn't set yet, check if it should be non-zero. */
+	if (!dev_ib && extern_get_bw) {
+		dev_ib = extern_get_bw();
+		if (dev_ab)
+			*dev_ab = dev_ib / 4;
+	}
+
+	*freq = dev_ib;
+	return 0;
+}
+
+/*
+ * Registers a function to be used to request a frequency
+ * value from legacy vbif based bus bandwidth governor.
+ * This function is called by KGSL driver.
+ */
+void devfreq_vbif_register_callback(void *p)
+{
+	extern_get_bw = p;
+}
+
+int devfreq_vbif_update_bw(unsigned long ib, unsigned long ab)
+{
+	int ret = 0;
+
+	mutex_lock(&df_lock);
+	if (df) {
+		mutex_lock(&df->lock);
+		dev_ib = ib;
+		*dev_ab = ab;
+		ret = update_devfreq(df);
+		mutex_unlock(&df->lock);
+	}
+	mutex_unlock(&df_lock);
+	return ret;
+}
+
+static int devfreq_vbif_ev_handler(struct devfreq *devfreq,
+					unsigned int event, void *data)
+{
+	int ret;
+	struct devfreq_dev_status stat;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		mutex_lock(&df_lock);
+		df = devfreq;
+		if (df->profile->get_dev_status &&
+			!df->profile->get_dev_status(df->dev.parent, &stat) &&
+			stat.private_data)
+			dev_ab = stat.private_data;
+		else
+			pr_warn("Device doesn't take AB votes!\n");
+
+		mutex_unlock(&df_lock);
+
+		ret = devfreq_vbif_update_bw(0, 0);
+		if (ret) {
+			pr_err("Unable to update BW! Gov start failed!\n");
+			return ret;
+		}
+		/*
+		 * Normally at this point governors start the polling with
+		 * devfreq_monitor_start(df);
+		 * This governor doesn't poll, but expect external calls
+		 * of its devfreq_vbif_update_bw() function
+		 */
+		pr_debug("Enabled MSM VBIF governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		mutex_lock(&df_lock);
+		df = NULL;
+		mutex_unlock(&df_lock);
+
+		pr_debug("Disabled MSM VBIF governor\n");
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_vbif = {
+	.name = "bw_vbif",
+	.get_target_freq = devfreq_vbif_get_freq,
+	.event_handler = devfreq_vbif_ev_handler,
+};
+
+static int __init devfreq_vbif_init(void)
+{
+	return devfreq_add_governor(&devfreq_vbif);
+}
+subsys_initcall(devfreq_vbif_init);
+
+static void __exit devfreq_vbif_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_vbif);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+}
+module_exit(devfreq_vbif_exit);
+
+MODULE_DESCRIPTION("VBIF based GPU bus BW voting governor");
+MODULE_LICENSE("GPL v2");
+
+
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
new file mode 100644
index 0000000..e3087a7
--- /dev/null
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/devfreq.h>
+#include <linux/module.h>
+#include <linux/msm_adreno_devfreq.h>
+#include <linux/slab.h>
+
+#include "devfreq_trace.h"
+#include "governor.h"
+
+#define MIN_BUSY                1000
+#define LONG_FLOOR              50000
+#define HIST                    5
+#define TARGET                  80
+#define CAP                     75
+#define WAIT_THRESHOLD          10
+/* AB vote is in multiple of BW_STEP Mega bytes */
+#define BW_STEP                 160
+
+static void _update_cutoff(struct devfreq_msm_adreno_tz_data *priv,
+					unsigned int norm_max)
+{
+	int i;
+
+	priv->bus.max = norm_max;
+	for (i = 0; i < priv->bus.num; i++) {
+		priv->bus.up[i] = priv->bus.p_up[i] * norm_max / 100;
+		priv->bus.down[i] = priv->bus.p_down[i] * norm_max / 100;
+	}
+}
+
+static inline int devfreq_get_freq_level(struct devfreq *devfreq,
+	unsigned long freq)
+{
+	int lev;
+
+	for (lev = 0; lev < devfreq->profile->max_state; lev++)
+		if (freq == devfreq->profile->freq_table[lev])
+			return lev;
+
+	return -EINVAL;
+}
+
+static int devfreq_gpubw_get_target(struct devfreq *df,
+				unsigned long *freq)
+{
+
+	struct devfreq_msm_adreno_tz_data *priv = df->data;
+	struct msm_busmon_extended_profile *bus_profile = container_of(
+					(df->profile),
+					struct msm_busmon_extended_profile,
+					profile);
+	struct devfreq_dev_status stats;
+	struct xstats b;
+	int result;
+	int level = 0;
+	int act_level;
+	int norm_max_cycles;
+	int norm_cycles;
+	int wait_active_percent;
+	int gpu_percent;
+	/*
+	 * Normalized AB should at max usage be the gpu_bimc frequency in MHz.
+	 * Start with a reasonable value and let the system push it up to max.
+	 */
+	static int norm_ab_max = 300;
+	int norm_ab;
+	unsigned long ab_mbytes = 0;
+
+	if (priv == NULL)
+		return 0;
+
+	stats.private_data = &b;
+
+	result = df->profile->get_dev_status(df->dev.parent, &stats);
+
+	*freq = stats.current_frequency;
+
+	priv->bus.total_time += stats.total_time;
+	priv->bus.gpu_time += stats.busy_time;
+	priv->bus.ram_time += b.ram_time;
+	priv->bus.ram_wait += b.ram_wait;
+
+	level = devfreq_get_freq_level(df, stats.current_frequency);
+
+	if (priv->bus.total_time < LONG_FLOOR)
+		return result;
+
+	norm_max_cycles = (unsigned int)(priv->bus.ram_time) /
+			(unsigned int) priv->bus.total_time;
+	norm_cycles = (unsigned int)(priv->bus.ram_time + priv->bus.ram_wait) /
+			(unsigned int) priv->bus.total_time;
+	wait_active_percent = (100 * (unsigned int)priv->bus.ram_wait) /
+			(unsigned int) priv->bus.ram_time;
+	gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) /
+			(unsigned int) priv->bus.total_time;
+
+	/*
+	 * If there's a new high watermark, update the cutoffs and send the
+	 * FAST hint.  Otherwise check the current value against the current
+	 * cutoffs.
+	 */
+	if (norm_max_cycles > priv->bus.max) {
+		_update_cutoff(priv, norm_max_cycles);
+		bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
+	} else {
+		/* GPU votes for IB not AB so don't under vote the system */
+		norm_cycles = (100 * norm_cycles) / TARGET;
+		act_level = priv->bus.index[level] + b.mod;
+		act_level = (act_level < 0) ? 0 : act_level;
+		act_level = (act_level >= priv->bus.num) ?
+		(priv->bus.num - 1) : act_level;
+		if ((norm_cycles > priv->bus.up[act_level] ||
+				wait_active_percent > WAIT_THRESHOLD) &&
+				gpu_percent > CAP)
+			bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
+		else if (norm_cycles < priv->bus.down[act_level] && level)
+			bus_profile->flag = DEVFREQ_FLAG_SLOW_HINT;
+	}
+
+	/* Calculate the AB vote based on bus width if defined */
+	if (priv->bus.width) {
+		norm_ab =  (unsigned int)priv->bus.ram_time /
+			(unsigned int) priv->bus.total_time;
+		/* Calculate AB in Mega Bytes and roundup in BW_STEP */
+		ab_mbytes = (norm_ab * priv->bus.width * 1000000ULL) >> 20;
+		bus_profile->ab_mbytes = roundup(ab_mbytes, BW_STEP);
+	} else if (bus_profile->flag) {
+		/* Re-calculate the AB percentage for a new IB vote */
+		norm_ab =  (unsigned int)priv->bus.ram_time /
+			(unsigned int) priv->bus.total_time;
+		if (norm_ab > norm_ab_max)
+			norm_ab_max = norm_ab;
+		bus_profile->percent_ab = (100 * norm_ab) / norm_ab_max;
+	}
+
+	priv->bus.total_time = 0;
+	priv->bus.gpu_time = 0;
+	priv->bus.ram_time = 0;
+	priv->bus.ram_wait = 0;
+
+	return result;
+}
+
+static int gpubw_start(struct devfreq *devfreq)
+{
+	struct devfreq_msm_adreno_tz_data *priv;
+
+	struct msm_busmon_extended_profile *bus_profile = container_of(
+					(devfreq->profile),
+					struct msm_busmon_extended_profile,
+					profile);
+	unsigned int t1, t2 = 2 * HIST;
+	int i, bus_size;
+
+
+	devfreq->data = bus_profile->private_data;
+	priv = devfreq->data;
+
+	bus_size = sizeof(u32) * priv->bus.num;
+	priv->bus.up = kzalloc(bus_size, GFP_KERNEL);
+	priv->bus.down = kzalloc(bus_size, GFP_KERNEL);
+	priv->bus.p_up = kzalloc(bus_size, GFP_KERNEL);
+	priv->bus.p_down = kzalloc(bus_size, GFP_KERNEL);
+	if (priv->bus.up == NULL || priv->bus.down == NULL ||
+		priv->bus.p_up == NULL || priv->bus.p_down == NULL)
+		return -ENOMEM;
+
+	/* Set up the cut-over percentages for the bus calculation. */
+	for (i = 0; i < priv->bus.num; i++) {
+		t1 = (u32)(100 * priv->bus.ib[i]) /
+				(u32)priv->bus.ib[priv->bus.num - 1];
+		priv->bus.p_up[i] = t1 - HIST;
+		priv->bus.p_down[i] = t2 - 2 * HIST;
+		t2 = t1;
+	}
+	/* Set the upper-most and lower-most bounds correctly. */
+	priv->bus.p_down[0] = 0;
+	priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ?
+				priv->bus.p_down[1] : (2 * HIST);
+	if (priv->bus.num >= 1)
+		priv->bus.p_up[priv->bus.num - 1] = 100;
+	_update_cutoff(priv, priv->bus.max);
+
+	return 0;
+}
+
+static int gpubw_stop(struct devfreq *devfreq)
+{
+	struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+	if (priv) {
+		kfree(priv->bus.up);
+		kfree(priv->bus.down);
+		kfree(priv->bus.p_up);
+		kfree(priv->bus.p_down);
+	}
+	devfreq->data = NULL;
+	return 0;
+}
+
+static int devfreq_gpubw_event_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	int result = 0;
+	unsigned long freq;
+
+	mutex_lock(&devfreq->lock);
+	freq = devfreq->previous_freq;
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		result = gpubw_start(devfreq);
+		break;
+	case DEVFREQ_GOV_STOP:
+		result = gpubw_stop(devfreq);
+		break;
+	case DEVFREQ_GOV_RESUME:
+		/* TODO ..... */
+		/* ret = update_devfreq(devfreq); */
+		break;
+	case DEVFREQ_GOV_SUSPEND:
+		{
+			struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+			priv->bus.total_time = 0;
+			priv->bus.gpu_time = 0;
+			priv->bus.ram_time = 0;
+		}
+		break;
+	default:
+		result = 0;
+		break;
+	}
+	mutex_unlock(&devfreq->lock);
+	return result;
+}
+
+static struct devfreq_governor devfreq_gpubw = {
+	.name = "gpubw_mon",
+	.get_target_freq = devfreq_gpubw_get_target,
+	.event_handler = devfreq_gpubw_event_handler,
+};
+
+static int __init devfreq_gpubw_init(void)
+{
+	return devfreq_add_governor(&devfreq_gpubw);
+}
+subsys_initcall(devfreq_gpubw_init);
+
+static void __exit devfreq_gpubw_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_gpubw);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+}
+module_exit(devfreq_gpubw_exit);
+
+MODULE_DESCRIPTION("GPU bus bandwidth voting driver. Uses VBIF counters");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
new file mode 100644
index 0000000..f6cab36
--- /dev/null
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010-2019, The Linux Foundation. All rights reserved.
+ */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/devfreq.h>
+#include <linux/math64.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ftrace.h>
+#include <linux/mm.h>
+#include <linux/msm_adreno_devfreq.h>
+#include <asm/cacheflush.h>
+#include <soc/qcom/scm.h>
+#include "governor.h"
+
+static DEFINE_SPINLOCK(tz_lock);
+static DEFINE_SPINLOCK(sample_lock);
+static DEFINE_SPINLOCK(suspend_lock);
+/*
+ * FLOOR is 5msec to capture up to 3 re-draws
+ * per frame for 60fps content.
+ */
+#define FLOOR		        5000
+/*
+ * MIN_BUSY is 1 msec for the sample to be sent
+ */
+#define MIN_BUSY		1000
+#define MAX_TZ_VERSION		0
+
+/*
+ * CEILING is 50msec, larger than any standard
+ * frame length, but less than the idle timer.
+ */
+#define CEILING			50000
+#define TZ_RESET_ID		0x3
+#define TZ_UPDATE_ID		0x4
+#define TZ_INIT_ID		0x6
+
+#define TZ_RESET_ID_64          0x7
+#define TZ_UPDATE_ID_64         0x8
+#define TZ_INIT_ID_64           0x9
+
+#define TZ_V2_UPDATE_ID_64         0xA
+#define TZ_V2_INIT_ID_64           0xB
+#define TZ_V2_INIT_CA_ID_64        0xC
+#define TZ_V2_UPDATE_WITH_CA_ID_64 0xD
+
+#define TAG "msm_adreno_tz: "
+
+static u64 suspend_time;
+static u64 suspend_start;
+static unsigned long acc_total, acc_relative_busy;
+
+static struct msm_adreno_extended_profile *partner_gpu_profile;
+static void do_partner_start_event(struct work_struct *work);
+static void do_partner_stop_event(struct work_struct *work);
+static void do_partner_suspend_event(struct work_struct *work);
+static void do_partner_resume_event(struct work_struct *work);
+
+static struct workqueue_struct *workqueue;
+
+/*
+ * Returns GPU suspend time in millisecond.
+ */
+u64 suspend_time_ms(void)
+{
+	u64 suspend_sampling_time;
+	u64 time_diff = 0;
+
+	if (suspend_start == 0)
+		return 0;
+
+	suspend_sampling_time = (u64)ktime_to_ms(ktime_get());
+	time_diff = suspend_sampling_time - suspend_start;
+	/* Update the suspend_start sample again */
+	suspend_start = suspend_sampling_time;
+	return time_diff;
+}
+
+static ssize_t gpu_load_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	unsigned long sysfs_busy_perc = 0;
+	/*
+	 * Average out the samples taken since last read
+	 * This will keep the average value in sync with
+	 * with the client sampling duration.
+	 */
+	spin_lock(&sample_lock);
+	if (acc_total)
+		sysfs_busy_perc = (acc_relative_busy * 100) / acc_total;
+
+	/* Reset the parameters */
+	acc_total = 0;
+	acc_relative_busy = 0;
+	spin_unlock(&sample_lock);
+	return snprintf(buf, PAGE_SIZE, "%lu\n", sysfs_busy_perc);
+}
+
+/*
+ * Returns the time in ms for which gpu was in suspend state
+ * since last time the entry is read.
+ */
+static ssize_t suspend_time_show(struct device *dev,
+	struct device_attribute *attr,
+	char *buf)
+{
+	u64 time_diff = 0;
+
+	spin_lock(&suspend_lock);
+	time_diff = suspend_time_ms();
+	/*
+	 * Adding the previous suspend time also as the gpu
+	 * can go and come out of suspend states in between
+	 * reads also and we should have the total suspend
+	 * since last read.
+	 */
+	time_diff += suspend_time;
+	suspend_time = 0;
+	spin_unlock(&suspend_lock);
+
+	return snprintf(buf, PAGE_SIZE, "%llu\n", time_diff);
+}
+
+static DEVICE_ATTR_RO(gpu_load);
+
+static DEVICE_ATTR_RO(suspend_time);
+
+static const struct device_attribute *adreno_tz_attr_list[] = {
+		&dev_attr_gpu_load,
+		&dev_attr_suspend_time,
+		NULL
+};
+
+void compute_work_load(struct devfreq_dev_status *stats,
+		struct devfreq_msm_adreno_tz_data *priv,
+		struct devfreq *devfreq)
+{
+	spin_lock(&sample_lock);
+	/*
+	 * Keep collecting the stats till the client
+	 * reads it. Average of all samples and reset
+	 * is done when the entry is read
+	 */
+	acc_total += stats->total_time;
+	acc_relative_busy += (stats->busy_time * stats->current_frequency) /
+				devfreq->profile->freq_table[0];
+	spin_unlock(&sample_lock);
+}
+
+/* Trap into the TrustZone, and call funcs there. */
+static int __secure_tz_reset_entry2(unsigned int *scm_data, u32 size_scm_data,
+					bool is_64)
+{
+	int ret;
+	/* sync memory before sending the commands to tz */
+	__iowmb();
+
+	if (!is_64) {
+		struct scm_desc desc = {
+			.args[0] = scm_data[0],
+			.args[1] = scm_data[1],
+			.arginfo = SCM_ARGS(2),
+		};
+		spin_lock(&tz_lock);
+		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, TZ_RESET_ID),
+			&desc);
+		spin_unlock(&tz_lock);
+	} else {
+		struct scm_desc desc = {0};
+
+		desc.arginfo = 0;
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS,
+				 TZ_RESET_ID_64), &desc);
+	}
+	return ret;
+}
+
+static int __secure_tz_update_entry3(unsigned int *scm_data, u32 size_scm_data,
+		int *val, u32 size_val, struct devfreq_msm_adreno_tz_data *priv)
+{
+	int ret;
+	/* sync memory before sending the commands to tz */
+	__iowmb();
+
+	if (!priv->is_64) {
+		struct scm_desc desc = {
+			.args[0] = scm_data[0],
+			.args[1] = scm_data[1],
+			.args[2] = scm_data[2],
+			.arginfo = SCM_ARGS(3),
+		};
+		spin_lock(&tz_lock);
+		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, TZ_UPDATE_ID),
+			&desc);
+		spin_unlock(&tz_lock);
+		*val = ret;
+	} else {
+		unsigned int cmd_id;
+		struct scm_desc desc = {0};
+
+		desc.args[0] = scm_data[0];
+		desc.args[1] = scm_data[1];
+		desc.args[2] = scm_data[2];
+
+		if (!priv->ctxt_aware_enable) {
+			desc.arginfo = SCM_ARGS(3);
+			cmd_id =  TZ_V2_UPDATE_ID_64;
+		} else {
+			/* Add context count infomration to update*/
+			desc.args[3] = scm_data[3];
+			desc.arginfo = SCM_ARGS(4);
+			cmd_id =  TZ_V2_UPDATE_WITH_CA_ID_64;
+		}
+			ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, cmd_id),
+						&desc);
+			*val = desc.ret[0];
+	}
+	return ret;
+}
+
+static int tz_init_ca(struct devfreq_msm_adreno_tz_data *priv)
+{
+	unsigned int tz_ca_data[2];
+	struct scm_desc desc = {0};
+	u8 *tz_buf;
+	int ret;
+
+	/* Set data for TZ */
+	tz_ca_data[0] = priv->bin.ctxt_aware_target_pwrlevel;
+	tz_ca_data[1] = priv->bin.ctxt_aware_busy_penalty;
+
+	tz_buf = kzalloc(PAGE_ALIGN(sizeof(tz_ca_data)), GFP_KERNEL);
+	if (!tz_buf)
+		return -ENOMEM;
+
+	memcpy(tz_buf, tz_ca_data, sizeof(tz_ca_data));
+	/* Ensure memcpy completes execution */
+	mb();
+	dmac_flush_range(tz_buf,
+		tz_buf + PAGE_ALIGN(sizeof(tz_ca_data)));
+
+	desc.args[0] = virt_to_phys(tz_buf);
+	desc.args[1] = sizeof(tz_ca_data);
+	desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS,
+			TZ_V2_INIT_CA_ID_64),
+			&desc);
+
+	kzfree(tz_buf);
+
+	return ret;
+}
+
+static int tz_init(struct devfreq_msm_adreno_tz_data *priv,
+			unsigned int *tz_pwrlevels, u32 size_pwrlevels,
+			unsigned int *version, u32 size_version)
+{
+	int ret;
+	/* Make sure all CMD IDs are avaialble */
+	if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID_64) &&
+			scm_is_call_available(SCM_SVC_DCVS, TZ_UPDATE_ID_64) &&
+			scm_is_call_available(SCM_SVC_DCVS, TZ_RESET_ID_64)) {
+		struct scm_desc desc = {0};
+		u8 *tz_buf;
+
+		tz_buf = kzalloc(PAGE_ALIGN(size_pwrlevels), GFP_KERNEL);
+		if (!tz_buf)
+			return -ENOMEM;
+		memcpy(tz_buf, tz_pwrlevels, size_pwrlevels);
+		/* Ensure memcpy completes execution */
+		mb();
+		dmac_flush_range(tz_buf, tz_buf + PAGE_ALIGN(size_pwrlevels));
+
+		desc.args[0] = virt_to_phys(tz_buf);
+		desc.args[1] = size_pwrlevels;
+		desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, TZ_V2_INIT_ID_64),
+				&desc);
+		*version = desc.ret[0];
+		if (!ret)
+			priv->is_64 = true;
+		kzfree(tz_buf);
+	} else
+		ret = -EINVAL;
+
+	 /* Initialize context aware feature, if enabled. */
+	if (!ret && priv->ctxt_aware_enable) {
+		if (priv->is_64 &&
+			(scm_is_call_available(SCM_SVC_DCVS,
+				TZ_V2_INIT_CA_ID_64)) &&
+			(scm_is_call_available(SCM_SVC_DCVS,
+				TZ_V2_UPDATE_WITH_CA_ID_64))) {
+			ret = tz_init_ca(priv);
+			/*
+			 * If context aware feature initialization fails,
+			 * just print an error message and return
+			 * success as normal DCVS will still work.
+			 */
+			if (ret) {
+				pr_err(TAG "tz: context aware DCVS init failed\n");
+				priv->ctxt_aware_enable = false;
+				return 0;
+			}
+		} else {
+			pr_warn(TAG "tz: context aware DCVS not supported\n");
+			priv->ctxt_aware_enable = false;
+		}
+	}
+
+	return ret;
+}
+
+static inline int devfreq_get_freq_level(struct devfreq *devfreq,
+	unsigned long freq)
+{
+	int lev;
+
+	for (lev = 0; lev < devfreq->profile->max_state; lev++)
+		if (freq == devfreq->profile->freq_table[lev])
+			return lev;
+
+	return -EINVAL;
+}
+
+static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq)
+{
+	int result = 0;
+	struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+	struct devfreq_dev_status stats;
+	int val, level = 0;
+	unsigned int scm_data[4];
+	int context_count = 0;
+
+	/* keeps stats.private_data == NULL   */
+	result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats);
+	if (result) {
+		pr_err(TAG "get_status failed %d\n", result);
+		return result;
+	}
+
+	*freq = stats.current_frequency;
+	priv->bin.total_time += stats.total_time;
+	priv->bin.busy_time += stats.busy_time;
+
+	if (stats.private_data)
+		context_count =  *((int *)stats.private_data);
+
+	/* Update the GPU load statistics */
+	compute_work_load(&stats, priv, devfreq);
+	/*
+	 * Do not waste CPU cycles running this algorithm if
+	 * the GPU just started, or if less than FLOOR time
+	 * has passed since the last run or the gpu hasn't been
+	 * busier than MIN_BUSY.
+	 */
+	if ((stats.total_time == 0) ||
+		(priv->bin.total_time < FLOOR) ||
+		(unsigned int) priv->bin.busy_time < MIN_BUSY) {
+		return 0;
+	}
+
+	level = devfreq_get_freq_level(devfreq, stats.current_frequency);
+	if (level < 0) {
+		pr_err(TAG "bad freq %ld\n", stats.current_frequency);
+		return level;
+	}
+
+	/*
+	 * If there is an extended block of busy processing,
+	 * increase frequency.  Otherwise run the normal algorithm.
+	 */
+	if (!priv->disable_busy_time_burst &&
+			priv->bin.busy_time > CEILING) {
+		val = -1 * level;
+	} else {
+
+		scm_data[0] = level;
+		scm_data[1] = priv->bin.total_time;
+		scm_data[2] = priv->bin.busy_time;
+		scm_data[3] = context_count;
+		__secure_tz_update_entry3(scm_data, sizeof(scm_data),
+					&val, sizeof(val), priv);
+	}
+	priv->bin.total_time = 0;
+	priv->bin.busy_time = 0;
+
+	/*
+	 * If the decision is to move to a different level, make sure the GPU
+	 * frequency changes.
+	 */
+	if (val) {
+		level += val;
+		level = max(level, 0);
+		level = min_t(int, level, devfreq->profile->max_state - 1);
+	}
+
+	*freq = devfreq->profile->freq_table[level];
+	return 0;
+}
+
+static int tz_notify(struct notifier_block *nb, unsigned long type, void *devp)
+{
+	int result = 0;
+	struct devfreq *devfreq = devp;
+
+	switch (type) {
+	case ADRENO_DEVFREQ_NOTIFY_IDLE:
+	case ADRENO_DEVFREQ_NOTIFY_RETIRE:
+		mutex_lock(&devfreq->lock);
+		result = update_devfreq(devfreq);
+		mutex_unlock(&devfreq->lock);
+		/* Nofifying partner bus governor if any */
+		if (partner_gpu_profile && partner_gpu_profile->bus_devfreq) {
+			mutex_lock(&partner_gpu_profile->bus_devfreq->lock);
+			update_devfreq(partner_gpu_profile->bus_devfreq);
+			mutex_unlock(&partner_gpu_profile->bus_devfreq->lock);
+		}
+		break;
+	/* ignored by this governor */
+	case ADRENO_DEVFREQ_NOTIFY_SUBMIT:
+	default:
+		break;
+	}
+	return notifier_from_errno(result);
+}
+
+static int tz_start(struct devfreq *devfreq)
+{
+	struct devfreq_msm_adreno_tz_data *priv;
+	unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1];
+	int i, out, ret;
+	unsigned int version;
+
+	struct msm_adreno_extended_profile *gpu_profile = container_of(
+					(devfreq->profile),
+					struct msm_adreno_extended_profile,
+					profile);
+
+	/*
+	 * Assuming that we have only one instance of the adreno device
+	 * connected to this governor,
+	 * can safely restore the pointer to the governor private data
+	 * from the container of the device profile
+	 */
+	devfreq->data = gpu_profile->private_data;
+	partner_gpu_profile = gpu_profile;
+
+	priv = devfreq->data;
+	priv->nb.notifier_call = tz_notify;
+
+	out = 1;
+	if (devfreq->profile->max_state < MSM_ADRENO_MAX_PWRLEVELS) {
+		for (i = 0; i < devfreq->profile->max_state; i++)
+			tz_pwrlevels[out++] = devfreq->profile->freq_table[i];
+		tz_pwrlevels[0] = i;
+	} else {
+		pr_err(TAG "tz_pwrlevels[] is too short\n");
+		return -EINVAL;
+	}
+
+	INIT_WORK(&gpu_profile->partner_start_event_ws,
+					do_partner_start_event);
+	INIT_WORK(&gpu_profile->partner_stop_event_ws,
+					do_partner_stop_event);
+	INIT_WORK(&gpu_profile->partner_suspend_event_ws,
+					do_partner_suspend_event);
+	INIT_WORK(&gpu_profile->partner_resume_event_ws,
+					do_partner_resume_event);
+
+	ret = tz_init(priv, tz_pwrlevels, sizeof(tz_pwrlevels), &version,
+				sizeof(version));
+	if (ret != 0 || version > MAX_TZ_VERSION) {
+		pr_err(TAG "tz_init failed\n");
+		return ret;
+	}
+
+	for (i = 0; adreno_tz_attr_list[i] != NULL; i++)
+		device_create_file(&devfreq->dev, adreno_tz_attr_list[i]);
+
+	return kgsl_devfreq_add_notifier(devfreq->dev.parent, &priv->nb);
+}
+
+static int tz_stop(struct devfreq *devfreq)
+{
+	int i;
+	struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+	kgsl_devfreq_del_notifier(devfreq->dev.parent, &priv->nb);
+
+	for (i = 0; adreno_tz_attr_list[i] != NULL; i++)
+		device_remove_file(&devfreq->dev, adreno_tz_attr_list[i]);
+
+	flush_workqueue(workqueue);
+
+	/* leaving the governor and cleaning the pointer to private data */
+	devfreq->data = NULL;
+	partner_gpu_profile = NULL;
+	return 0;
+}
+
+static int tz_suspend(struct devfreq *devfreq)
+{
+	struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+	unsigned int scm_data[2] = {0, 0};
+
+	__secure_tz_reset_entry2(scm_data, sizeof(scm_data), priv->is_64);
+
+	priv->bin.total_time = 0;
+	priv->bin.busy_time = 0;
+	return 0;
+}
+
+static int tz_handler(struct devfreq *devfreq, unsigned int event, void *data)
+{
+	int result;
+
+	struct msm_adreno_extended_profile *gpu_profile = container_of(
+					(devfreq->profile),
+					struct msm_adreno_extended_profile,
+					profile);
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		result = tz_start(devfreq);
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		/* Queue the stop work before the TZ is stopped */
+		if (partner_gpu_profile && partner_gpu_profile->bus_devfreq)
+			queue_work(workqueue,
+				&gpu_profile->partner_stop_event_ws);
+		spin_lock(&suspend_lock);
+		suspend_start = 0;
+		spin_unlock(&suspend_lock);
+		result = tz_stop(devfreq);
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		result = tz_suspend(devfreq);
+		if (!result) {
+			spin_lock(&suspend_lock);
+			/* Collect the start sample for suspend time */
+			suspend_start = (u64)ktime_to_ms(ktime_get());
+			spin_unlock(&suspend_lock);
+		}
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		spin_lock(&suspend_lock);
+		suspend_time += suspend_time_ms();
+		/* Reset the suspend_start when gpu resumes */
+		suspend_start = 0;
+		spin_unlock(&suspend_lock);
+		/* fallthrough */
+	case DEVFREQ_GOV_INTERVAL:
+		/* fallthrough, this governor doesn't use polling */
+	default:
+		result = 0;
+		break;
+	}
+
+	if (partner_gpu_profile && partner_gpu_profile->bus_devfreq)
+		switch (event) {
+		case DEVFREQ_GOV_START:
+			queue_work(workqueue,
+					&gpu_profile->partner_start_event_ws);
+			break;
+		case DEVFREQ_GOV_SUSPEND:
+			queue_work(workqueue,
+					&gpu_profile->partner_suspend_event_ws);
+			break;
+		case DEVFREQ_GOV_RESUME:
+			queue_work(workqueue,
+					&gpu_profile->partner_resume_event_ws);
+			break;
+		}
+
+	return result;
+}
+
+static void _do_partner_event(struct work_struct *work, unsigned int event)
+{
+	struct devfreq *bus_devfreq;
+
+	if (partner_gpu_profile == NULL)
+		return;
+
+	bus_devfreq = partner_gpu_profile->bus_devfreq;
+
+	if (bus_devfreq != NULL &&
+		bus_devfreq->governor &&
+		bus_devfreq->governor->event_handler)
+		bus_devfreq->governor->event_handler(bus_devfreq, event, NULL);
+}
+
+static void do_partner_start_event(struct work_struct *work)
+{
+	_do_partner_event(work, DEVFREQ_GOV_START);
+}
+
+static void do_partner_stop_event(struct work_struct *work)
+{
+	_do_partner_event(work, DEVFREQ_GOV_STOP);
+}
+
+static void do_partner_suspend_event(struct work_struct *work)
+{
+	_do_partner_event(work, DEVFREQ_GOV_SUSPEND);
+}
+
+static void do_partner_resume_event(struct work_struct *work)
+{
+	_do_partner_event(work, DEVFREQ_GOV_RESUME);
+}
+
+
+static struct devfreq_governor msm_adreno_tz = {
+	.name = "msm-adreno-tz",
+	.get_target_freq = tz_get_target_freq,
+	.event_handler = tz_handler,
+};
+
+static int __init msm_adreno_tz_init(void)
+{
+	workqueue = create_freezable_workqueue("governor_msm_adreno_tz_wq");
+
+	if (workqueue == NULL)
+		return -ENOMEM;
+
+	return devfreq_add_governor(&msm_adreno_tz);
+}
+subsys_initcall(msm_adreno_tz_init);
+
+static void __exit msm_adreno_tz_exit(void)
+{
+	int ret = devfreq_remove_governor(&msm_adreno_tz);
+
+	if (ret)
+		pr_err(TAG "failed to remove governor %d\n", ret);
+
+	if (workqueue != NULL)
+		destroy_workqueue(workqueue);
+}
+
+module_exit(msm_adreno_tz_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index 07cde76..abde8c7 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -1,9 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "esoc-mdm.h"
+#include <linux/input/qpnp-power-on.h>
 
 /* This function can be called from atomic context. */
 static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
@@ -73,9 +74,23 @@ static int sdx50m_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
 /* This function can be called from atomic context. */
 static int sdx55m_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
 {
+	struct device_node *node = mdm->dev->of_node;
+	int rc;
 	int soft_reset_direction_assert = 0,
 	    soft_reset_direction_de_assert = 1;
 
+	if (of_property_read_bool(node, "qcom,esoc-spmi-soft-reset")) {
+		esoc_mdm_log("Doing a Warm reset using SPMI\n");
+		rc = qpnp_pon_modem_pwr_off(PON_POWER_OFF_WARM_RESET);
+		if (rc) {
+			dev_err(mdm->dev, "SPMI warm reset failed\n");
+			esoc_mdm_log("SPMI warm reset failed\n");
+			return rc;
+		}
+		esoc_mdm_log("Warm reset done using SPMI\n");
+		return 0;
+	}
+
 	if (mdm->soft_reset_inverted) {
 		soft_reset_direction_assert = 1;
 		soft_reset_direction_de_assert = 0;
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index af3a20d..99c99a5 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -46,6 +46,7 @@
 	tristate "FSI master based on Aspeed ColdFire coprocessor"
 	depends on GPIOLIB
 	depends on GPIO_ASPEED
+	select GENERIC_ALLOCATOR
 	---help---
 	This option enables a FSI master using the AST2400 and AST2500 GPIO
 	lines driven by the internal ColdFire coprocessor. This requires
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index ef00d14..325e221 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2243,12 +2243,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
 #endif
 
 	WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
+	udelay(50);
 
 	/* carrizo do enable cp interrupt after cp inited */
-	if (!(adev->flags & AMD_IS_APU))
+	if (!(adev->flags & AMD_IS_APU)) {
 		gfx_v9_0_enable_gui_idle_interrupt(adev, true);
-
-	udelay(50);
+		udelay(50);
+	}
 
 #ifdef AMDGPU_RLC_DEBUG_RETRY
 	/* RLC_GPM_GENERAL_6 : RLC Ucode version */
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 7c6ac3c..8bb355d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -973,9 +973,21 @@ static int get_clock(void *i2c_priv)
 {
 	struct ast_i2c_chan *i2c = i2c_priv;
 	struct ast_private *ast = i2c->dev->dev_private;
-	uint32_t val;
+	uint32_t val, val2, count, pass;
 
-	val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
+	count = 0;
+	pass = 0;
+	val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+	do {
+		val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+		if (val == val2) {
+			pass++;
+		} else {
+			pass = 0;
+			val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+		}
+	} while ((pass < 5) && (count++ < 0x10000));
+
 	return val & 1 ? 1 : 0;
 }
 
@@ -983,9 +995,21 @@ static int get_data(void *i2c_priv)
 {
 	struct ast_i2c_chan *i2c = i2c_priv;
 	struct ast_private *ast = i2c->dev->dev_private;
-	uint32_t val;
+	uint32_t val, val2, count, pass;
 
-	val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
+	count = 0;
+	pass = 0;
+	val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+	do {
+		val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+		if (val == val2) {
+			pass++;
+		} else {
+			pass = 0;
+			val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+		}
+	} while ((pass < 5) && (count++ < 0x10000));
+
 	return val & 1 ? 1 : 0;
 }
 
@@ -998,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock)
 
 	for (i = 0; i < 0x10000; i++) {
 		ujcrb7 = ((clock & 0x01) ? 0 : 1);
-		ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+		ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
 		jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
 		if (ujcrb7 == jtemp)
 			break;
@@ -1014,7 +1038,7 @@ static void set_data(void *i2c_priv, int data)
 
 	for (i = 0; i < 0x10000; i++) {
 		ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
-		ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+		ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
 		jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
 		if (ujcrb7 == jtemp)
 			break;
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 14aac66..7a3a6ed 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -715,6 +715,7 @@ struct meson_hdmi_venc_vic_mode {
 	{ 5, &meson_hdmi_encp_mode_1080i60 },
 	{ 20, &meson_hdmi_encp_mode_1080i50 },
 	{ 32, &meson_hdmi_encp_mode_1080p24 },
+	{ 33, &meson_hdmi_encp_mode_1080p50 },
 	{ 34, &meson_hdmi_encp_mode_1080p30 },
 	{ 31, &meson_hdmi_encp_mode_1080p50 },
 	{ 16, &meson_hdmi_encp_mode_1080p60 },
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 14ee9085..8dc6c3d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -197,7 +197,8 @@ static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
 	SDE_ATRACE_BEGIN("pp_done_irq");
 
 	/* notify all synchronous clients first, then asynchronous clients */
-	if (phys_enc->parent_ops.handle_frame_done)
+	if (phys_enc->parent_ops.handle_frame_done &&
+	    atomic_read(&phys_enc->pending_kickoff_cnt))
 		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
 				phys_enc, event);
 
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 71d3445..07ee195 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -274,7 +274,7 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
 		break;
 	case INA2XX_CURRENT:
 		/* signed register, result in mA */
-		val = regval * data->current_lsb_uA;
+		val = (s16)regval * data->current_lsb_uA;
 		val = DIV_ROUND_CLOSEST(val, 1000);
 		break;
 	case INA2XX_CALIBRATION:
@@ -491,7 +491,7 @@ static int ina2xx_probe(struct i2c_client *client,
 	}
 
 	data->groups[group++] = &ina2xx_group;
-	if (id->driver_data == ina226)
+	if (chip == ina226)
 		data->groups[group++] = &ina226_group;
 
 	hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
@@ -500,7 +500,7 @@ static int ina2xx_probe(struct i2c_client *client,
 		return PTR_ERR(hwmon_dev);
 
 	dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
-		 id->name, data->rshunt);
+		 client->name, data->rshunt);
 
 	return 0;
 }
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index de46577..d8fa4be 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -51,7 +51,7 @@
  */
 #define MLXREG_FAN_GET_RPM(rval, d, s)	(DIV_ROUND_CLOSEST(15000000 * 100, \
 					 ((rval) + (s)) * (d)))
-#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask)))
+#define MLXREG_FAN_GET_FAULT(val, mask) (!((val) ^ (mask)))
 #define MLXREG_FAN_PWM_DUTY2STATE(duty)	(DIV_ROUND_CLOSEST((duty) *	\
 					 MLXREG_FAN_MAX_STATE,		\
 					 MLXREG_FAN_MAX_DUTY))
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index be5ba46..0d04572 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -115,7 +115,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct rpi_hwmon_data *data;
-	int ret;
 
 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
 	if (!data)
@@ -124,11 +123,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
 	/* Parent driver assure that firmware is correct */
 	data->fw = dev_get_drvdata(dev->parent);
 
-	/* Init throttled */
-	ret = rpi_firmware_property(data->fw, RPI_FIRMWARE_GET_THROTTLED,
-				    &data->last_throttled,
-				    sizeof(data->last_throttled));
-
 	data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt",
 							       data,
 							       &rpi_chip_info,
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 49276bb..1bb80f9 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -1691,7 +1691,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
  * somewhere else in the code
  */
 #define SENSOR_ATTR_TEMP(index) {					\
-	SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \
+	SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \
 		show_temp_mode, store_temp_mode, NOT_USED, index - 1),	\
 	SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp,		\
 		NULL, TEMP_READ, index - 1),				\
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 9965ecb..c72b210 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -197,6 +197,7 @@
 config CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE
 	int "default enable ETM for Remote processor based on instance id"
 	depends on CORESIGHT_REMOTE_ETM
+	default 0
 	help
 	  Support for enabling separated Remote processor ETM tracing. Depends
 	  on if instance id bit is set.
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index ee36619..25d43c8 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -767,8 +767,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
 
 	case NETDEV_CHANGEADDR:
 		cmds[0] = netdev_del_cmd;
-		cmds[1] = add_default_gid_cmd;
-		cmds[2] = add_cmd;
+		if (ndev->reg_state == NETREG_REGISTERED) {
+			cmds[1] = add_default_gid_cmd;
+			cmds[2] = add_cmd;
+		}
 		break;
 
 	case NETDEV_CHANGEUPPER:
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 85cd1a3..22bd978 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1252,6 +1252,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
 	/* Registered a new RoCE device instance to netdev */
 	rc = bnxt_re_register_netdev(rdev);
 	if (rc) {
+		rtnl_unlock();
 		pr_err("Failed to register with netedev: %#x\n", rc);
 		return -EINVAL;
 	}
@@ -1461,6 +1462,7 @@ static void bnxt_re_task(struct work_struct *work)
 				"Failed to register with IB: %#x", rc);
 			bnxt_re_remove_one(rdev);
 			bnxt_re_dev_unreg(rdev);
+			goto exit;
 		}
 		break;
 	case NETDEV_UP:
@@ -1484,6 +1486,7 @@ static void bnxt_re_task(struct work_struct *work)
 	}
 	smp_mb__before_atomic();
 	atomic_dec(&rdev->sched_count);
+exit:
 	kfree(re_work);
 }
 
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e1668bc..902d12d 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12485,7 +12485,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
 	}
 
 	/* allocate space for the counter values */
-	dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
+	dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
+			    GFP_KERNEL);
 	if (!dd->cntrs)
 		goto bail;
 
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index d947031..cfd2523 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -154,6 +154,8 @@ struct hfi1_ib_stats {
 extern struct hfi1_ib_stats hfi1_stats;
 extern const struct pci_error_handlers hfi1_pci_err_handler;
 
+extern int num_driver_cntrs;
+
 /*
  * First-cut criterion for "device is active" is
  * two thousand dwords combined Tx, Rx traffic per
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index a7c586a..3dfb4cf 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1701,7 +1701,7 @@ static const char * const driver_cntr_names[] = {
 static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
 static const char **dev_cntr_names;
 static const char **port_cntr_names;
-static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
+int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
 static int num_dev_cntrs;
 static int num_port_cntrs;
 static int cntr_names_initialized;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 0218c0f..a442b29 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1661,10 +1661,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
 	return hns_roce_cmq_send(hr_dev, &desc, 1);
 }
 
-static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
-				  unsigned long mtpt_idx)
+static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
+			struct hns_roce_mr *mr)
 {
-	struct hns_roce_v2_mpt_entry *mpt_entry;
 	struct scatterlist *sg;
 	u64 page_addr;
 	u64 *pages;
@@ -1672,6 +1671,53 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 	int len;
 	int entry;
 
+	mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+	mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+	roce_set_field(mpt_entry->byte_48_mode_ba,
+		       V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
+		       upper_32_bits(mr->pbl_ba >> 3));
+
+	pages = (u64 *)__get_free_page(GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	i = 0;
+	for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
+		len = sg_dma_len(sg) >> PAGE_SHIFT;
+		for (j = 0; j < len; ++j) {
+			page_addr = sg_dma_address(sg) +
+				(j << mr->umem->page_shift);
+			pages[i] = page_addr >> 6;
+			/* Record the first 2 entry directly to MTPT table */
+			if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
+				goto found;
+			i++;
+		}
+	}
+found:
+	mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
+	roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
+		       V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
+
+	mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
+	roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
+		       V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
+	roce_set_field(mpt_entry->byte_64_buf_pa1,
+		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+		       mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+
+	free_page((unsigned long)pages);
+
+	return 0;
+}
+
+static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+				  unsigned long mtpt_idx)
+{
+	struct hns_roce_v2_mpt_entry *mpt_entry;
+	int ret;
+
 	mpt_entry = mb_buf;
 	memset(mpt_entry, 0, sizeof(*mpt_entry));
 
@@ -1686,7 +1732,6 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 		       mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
 		       V2_MPT_BYTE_4_PD_S, mr->pd);
-	mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
 
 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
@@ -1700,13 +1745,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 		     (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
 		     (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
-	mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
 
 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
 		     mr->type == MR_TYPE_MR ? 0 : 1);
 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
 		     1);
-	mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
 
 	mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
 	mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
@@ -1717,53 +1760,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
 	if (mr->type == MR_TYPE_DMA)
 		return 0;
 
-	mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+	ret = set_mtpt_pbl(mpt_entry, mr);
 
-	mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
-	roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
-		       V2_MPT_BYTE_48_PBL_BA_H_S,
-		       upper_32_bits(mr->pbl_ba >> 3));
-	mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
-
-	pages = (u64 *)__get_free_page(GFP_KERNEL);
-	if (!pages)
-		return -ENOMEM;
-
-	i = 0;
-	for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
-		len = sg_dma_len(sg) >> PAGE_SHIFT;
-		for (j = 0; j < len; ++j) {
-			page_addr = sg_dma_address(sg) +
-				    (j << mr->umem->page_shift);
-			pages[i] = page_addr >> 6;
-
-			/* Record the first 2 entry directly to MTPT table */
-			if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
-				goto found;
-			i++;
-		}
-	}
-
-found:
-	mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
-	roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
-		       V2_MPT_BYTE_56_PA0_H_S,
-		       upper_32_bits(pages[0]));
-	mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
-
-	mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
-	roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
-		       V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
-
-	free_page((unsigned long)pages);
-
-	roce_set_field(mpt_entry->byte_64_buf_pa1,
-		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
-		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
-		       mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
-	mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
-
-	return 0;
+	return ret;
 }
 
 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
@@ -1772,6 +1771,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
 					u64 size, void *mb_buf)
 {
 	struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
+	int ret = 0;
 
 	if (flags & IB_MR_REREG_PD) {
 		roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
@@ -1784,14 +1784,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
 			     V2_MPT_BYTE_8_BIND_EN_S,
 			     (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
-			   V2_MPT_BYTE_8_ATOMIC_EN_S,
-			   (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0));
+			     V2_MPT_BYTE_8_ATOMIC_EN_S,
+			     mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
-			     (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0));
+			     mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
-			    (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
+			     mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
-			     (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
+			     mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
 	}
 
 	if (flags & IB_MR_REREG_TRANS) {
@@ -1800,21 +1800,13 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
 		mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
 		mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
 
-		mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
-		mpt_entry->pbl_ba_l =
-				cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
-		roce_set_field(mpt_entry->byte_48_mode_ba,
-			       V2_MPT_BYTE_48_PBL_BA_H_M,
-			       V2_MPT_BYTE_48_PBL_BA_H_S,
-			       upper_32_bits(mr->pbl_ba >> 3));
-		mpt_entry->byte_48_mode_ba =
-				cpu_to_le32(mpt_entry->byte_48_mode_ba);
-
 		mr->iova = iova;
 		mr->size = size;
+
+		ret = set_mtpt_pbl(mpt_entry, mr);
 	}
 
-	return 0;
+	return ret;
 }
 
 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d216e0d..9e1cac8 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -724,6 +724,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
 			head = frame;
 
 			bcnt -= frame->bcnt;
+			offset = 0;
 		}
 		break;
 
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d53d954..183fe5c 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4413,17 +4413,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
 			goto out;
 		}
 
-		if (wr->opcode == IB_WR_LOCAL_INV ||
-		    wr->opcode == IB_WR_REG_MR) {
+		if (wr->opcode == IB_WR_REG_MR) {
 			fence = dev->umr_fence;
 			next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
-		} else if (wr->send_flags & IB_SEND_FENCE) {
-			if (qp->next_fence)
-				fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
-			else
-				fence = MLX5_FENCE_MODE_FENCE;
-		} else {
-			fence = qp->next_fence;
+		} else  {
+			if (wr->send_flags & IB_SEND_FENCE) {
+				if (qp->next_fence)
+					fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+				else
+					fence = MLX5_FENCE_MODE_FENCE;
+			} else {
+				fence = qp->next_fence;
+			}
 		}
 
 		switch (ibqp->qp_type) {
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index 89ec0f6..084bb4b 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah);
  * rvt_create_ah - create an address handle
  * @pd: the protection domain
  * @ah_attr: the attributes of the AH
+ * @udata: pointer to user's input output buffer information.
  *
  * This may be called from interrupt context.
  *
  * Return: newly allocated ah
  */
 struct ib_ah *rvt_create_ah(struct ib_pd *pd,
-			    struct rdma_ah_attr *ah_attr)
+			    struct rdma_ah_attr *ah_attr,
+			    struct ib_udata *udata)
 {
 	struct rvt_ah *ah;
 	struct rvt_dev_info *dev = ib_to_rvt(pd->device);
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
index 16105af..25271b4 100644
--- a/drivers/infiniband/sw/rdmavt/ah.h
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -51,7 +51,8 @@
 #include <rdma/rdma_vt.h>
 
 struct ib_ah *rvt_create_ah(struct ib_pd *pd,
-			    struct rdma_ah_attr *ah_attr);
+			    struct rdma_ah_attr *ah_attr,
+			    struct ib_udata *udata);
 int rvt_destroy_ah(struct ib_ah *ibah);
 int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
 int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 886a2d8..f9456a53 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -328,6 +328,8 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
 					b->m.planes[plane].m.userptr;
 				planes[plane].length =
 					b->m.planes[plane].length;
+				planes[plane].data_offset =
+					b->m.planes[plane].data_offset;
 			}
 		}
 		if (b->memory == VB2_MEMORY_DMABUF) {
@@ -336,6 +338,8 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
 					b->m.planes[plane].m.fd;
 				planes[plane].length =
 					b->m.planes[plane].length;
+				planes[plane].data_offset =
+					b->m.planes[plane].data_offset;
 			}
 		}
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index e66cd9b..54e0200 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -4236,8 +4236,8 @@ static int cam_ife_hw_mgr_get_err_type(
 	void                              *handler_priv,
 	void                              *payload)
 {
-	struct cam_isp_resource_node         *hw_res_l = NULL;
-	struct cam_isp_resource_node         *hw_res_r = NULL;
+	struct cam_isp_resource_node         *hw_res_left = NULL;
+	struct cam_isp_resource_node         *hw_res_right = NULL;
 	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
 	struct cam_vfe_top_irq_evt_payload   *evt_payload;
 	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
@@ -4263,40 +4263,41 @@ static int cam_ife_hw_mgr_get_err_type(
 			(isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
 			continue;
 
-		hw_res_l = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_LEFT];
-		hw_res_r = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT];
+		hw_res_left = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_LEFT];
+		hw_res_right =
+			isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT];
 
-		CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d\n",
+		CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
 			isp_ife_camif_res->is_dual_vfe);
 
 		/* ERROR check for Left VFE */
-		if (!hw_res_l) {
+		if (!hw_res_left) {
 			CAM_DBG(CAM_ISP, "VFE(L) Device is NULL");
 			break;
 		}
 
 		CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
-			hw_res_l->hw_intf->hw_idx);
+			hw_res_left->hw_intf->hw_idx);
 
-		if (core_idx == hw_res_l->hw_intf->hw_idx) {
-			status = hw_res_l->bottom_half_handler(
-				hw_res_l, evt_payload);
+		if (core_idx == hw_res_left->hw_intf->hw_idx) {
+			status = hw_res_left->bottom_half_handler(
+				hw_res_left, evt_payload);
 		}
 
 		if (status)
 			break;
 
 		/* ERROR check for Right  VFE */
-		if (!hw_res_r) {
+		if (!hw_res_right) {
 			CAM_DBG(CAM_ISP, "VFE(R) Device is NULL");
 			continue;
 		}
 		CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
-			hw_res_r->hw_intf->hw_idx);
+			hw_res_right->hw_intf->hw_idx);
 
-		if (core_idx == hw_res_r->hw_intf->hw_idx) {
-			status = hw_res_r->bottom_half_handler(
-				hw_res_r, evt_payload);
+		if (core_idx == hw_res_right->hw_intf->hw_idx) {
+			status = hw_res_right->bottom_half_handler(
+				hw_res_right, evt_payload);
 		}
 
 		if (status)
@@ -4492,6 +4493,62 @@ static int cam_ife_hw_mgr_handle_reg_update(
 	return 0;
 }
 
+static int cam_ife_hw_mgr_handle_reg_update_in_bus(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
+	struct cam_vfe_bus_irq_evt_payload      *evt_payload;
+	cam_hw_event_cb_func                     ife_hwr_irq_rup_cb;
+	struct cam_isp_hw_reg_update_event_data  rup_event_data;
+	uint32_t                                 core_idx;
+	struct cam_ife_hw_mgr_res               *isp_ife_out_res;
+	struct cam_isp_resource_node            *hw_res_left;
+	uint32_t                                 rup_status = -EINVAL;
+	int                                      i = 0;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	ife_hwr_mgr_ctx = handler_priv;
+	evt_payload = payload;
+
+	if (!handler_priv || !payload) {
+		CAM_ERR(CAM_ISP, "Invalid Parameter");
+		return -EPERM;
+	}
+
+	core_idx = evt_payload->core_index;
+	ife_hwr_irq_rup_cb =
+		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
+
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+		isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
+		if (isp_ife_out_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		hw_res_left = isp_ife_out_res->hw_res[0];
+		if (hw_res_left && (evt_payload->core_index ==
+			hw_res_left->hw_intf->hw_idx))
+			rup_status = hw_res_left->bottom_half_handler(
+				hw_res_left, evt_payload);
+	}
+
+	CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
+
+	if (!rup_status) {
+		CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
+
+	if (!atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+		ife_hwr_irq_rup_cb(
+			ife_hwr_mgr_ctx->common.cb_priv,
+				CAM_ISP_HW_EVENT_REG_UPDATE,
+				&rup_event_data);
+	}
+
+	return 0;
+}
+
 static int cam_ife_hw_mgr_check_irq_for_dual_vfe(
 	struct cam_ife_hw_mgr_ctx   *ife_hw_mgr_ctx,
 	uint32_t                     core_idx0,
@@ -4548,8 +4605,8 @@ static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
 	void                              *payload)
 {
 	int32_t rc = -EINVAL;
-	struct cam_isp_resource_node         *hw_res_l;
-	struct cam_isp_resource_node         *hw_res_r;
+	struct cam_isp_resource_node         *hw_res_left;
+	struct cam_isp_resource_node         *hw_res_right;
 	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
 	struct cam_vfe_top_irq_evt_payload   *evt_payload;
 	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
@@ -4578,21 +4635,21 @@ static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
 			continue;
 		}
 
-		hw_res_l = isp_ife_camif_res->hw_res[0];
-		hw_res_r = isp_ife_camif_res->hw_res[1];
+		hw_res_left = isp_ife_camif_res->hw_res[0];
+		hw_res_right = isp_ife_camif_res->hw_res[1];
 
 		switch (isp_ife_camif_res->is_dual_vfe) {
 		/* Handling Single VFE Scenario */
 		case 0:
 			/* EPOCH check for Left side VFE */
-			if (!hw_res_l) {
+			if (!hw_res_left) {
 				CAM_ERR(CAM_ISP, "Left Device is NULL");
 				break;
 			}
 
-			if (core_idx == hw_res_l->hw_intf->hw_idx) {
-				epoch_status = hw_res_l->bottom_half_handler(
-					hw_res_l, evt_payload);
+			if (core_idx == hw_res_left->hw_intf->hw_idx) {
+				epoch_status = hw_res_left->bottom_half_handler(
+					hw_res_left, evt_payload);
 				if (atomic_read(
 					&ife_hwr_mgr_ctx->overflow_pending))
 					break;
@@ -4609,13 +4666,13 @@ static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
 		case 1:
 			/* SOF check for Left side VFE (Master)*/
 
-			if ((!hw_res_l) || (!hw_res_r)) {
+			if ((!hw_res_left) || (!hw_res_right)) {
 				CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
 				break;
 			}
-			if (core_idx == hw_res_l->hw_intf->hw_idx) {
-				epoch_status = hw_res_l->bottom_half_handler(
-					hw_res_l, evt_payload);
+			if (core_idx == hw_res_left->hw_intf->hw_idx) {
+				epoch_status = hw_res_left->bottom_half_handler(
+					hw_res_left, evt_payload);
 
 				if (!epoch_status)
 					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
@@ -4624,9 +4681,10 @@ static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
 			}
 
 			/* SOF check for Right side VFE */
-			if (core_idx == hw_res_r->hw_intf->hw_idx) {
-				epoch_status = hw_res_r->bottom_half_handler(
-					hw_res_r, evt_payload);
+			if (core_idx == hw_res_right->hw_intf->hw_idx) {
+				epoch_status =
+					hw_res_right->bottom_half_handler(
+					hw_res_right, evt_payload);
 
 				if (!epoch_status)
 					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
@@ -4634,8 +4692,8 @@ static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
 					break;
 			}
 
-			core_index0 = hw_res_l->hw_intf->hw_idx;
-			core_index1 = hw_res_r->hw_intf->hw_idx;
+			core_index0 = hw_res_left->hw_intf->hw_idx;
+			core_index1 = hw_res_right->hw_intf->hw_idx;
 
 			rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
 					ife_hwr_mgr_ctx,
@@ -4671,8 +4729,8 @@ static int cam_ife_hw_mgr_process_camif_sof(
 	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx,
 	struct cam_vfe_top_irq_evt_payload   *evt_payload)
 {
-	struct cam_isp_resource_node         *hw_res_l = NULL;
-	struct cam_isp_resource_node         *hw_res_r = NULL;
+	struct cam_isp_resource_node         *hw_res_left = NULL;
+	struct cam_isp_resource_node         *hw_res_right = NULL;
 	int32_t rc = -EINVAL;
 	uint32_t  core_idx;
 	uint32_t  sof_status = 0;
@@ -4681,8 +4739,8 @@ static int cam_ife_hw_mgr_process_camif_sof(
 
 	CAM_DBG(CAM_ISP, "Enter");
 	core_idx = evt_payload->core_index;
-	hw_res_l = isp_ife_camif_res->hw_res[0];
-	hw_res_r = isp_ife_camif_res->hw_res[1];
+	hw_res_left = isp_ife_camif_res->hw_res[0];
+	hw_res_right = isp_ife_camif_res->hw_res[1];
 	CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
 		isp_ife_camif_res->is_dual_vfe);
 
@@ -4690,16 +4748,16 @@ static int cam_ife_hw_mgr_process_camif_sof(
 	/* Handling Single VFE Scenario */
 	case 0:
 		/* SOF check for Left side VFE */
-		if (!hw_res_l) {
+		if (!hw_res_left) {
 			CAM_ERR(CAM_ISP, "VFE Device is NULL");
 			break;
 		}
 		CAM_DBG(CAM_ISP, "curr_core_idx = %d,core idx hw = %d",
-			core_idx, hw_res_l->hw_intf->hw_idx);
+			core_idx, hw_res_left->hw_intf->hw_idx);
 
-		if (core_idx == hw_res_l->hw_intf->hw_idx) {
-			sof_status = hw_res_l->bottom_half_handler(hw_res_l,
-				evt_payload);
+		if (core_idx == hw_res_left->hw_intf->hw_idx) {
+			sof_status = hw_res_left->bottom_half_handler(
+				hw_res_left, evt_payload);
 			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
 				break;
 			if (!sof_status)
@@ -4712,17 +4770,17 @@ static int cam_ife_hw_mgr_process_camif_sof(
 	case 1:
 		/* SOF check for Left side VFE */
 
-		if (!hw_res_l) {
+		if (!hw_res_left) {
 			CAM_ERR(CAM_ISP, "VFE Device is NULL");
 			break;
 		}
 		CAM_DBG(CAM_ISP, "curr_core_idx = %d, res hw idx= %d",
 				 core_idx,
-				hw_res_l->hw_intf->hw_idx);
+				hw_res_left->hw_intf->hw_idx);
 
-		if (core_idx == hw_res_l->hw_intf->hw_idx) {
-			sof_status = hw_res_l->bottom_half_handler(
-				hw_res_l, evt_payload);
+		if (core_idx == hw_res_left->hw_intf->hw_idx) {
+			sof_status = hw_res_left->bottom_half_handler(
+				hw_res_left, evt_payload);
 			if (!sof_status)
 				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
 			else
@@ -4730,24 +4788,24 @@ static int cam_ife_hw_mgr_process_camif_sof(
 		}
 
 		/* SOF check for Right side VFE */
-		if (!hw_res_r) {
+		if (!hw_res_right) {
 			CAM_ERR(CAM_ISP, "VFE Device is NULL");
 			break;
 		}
 		CAM_DBG(CAM_ISP, "curr_core_idx = %d, ews hw idx= %d",
 				 core_idx,
-				hw_res_r->hw_intf->hw_idx);
-		if (core_idx == hw_res_r->hw_intf->hw_idx) {
-			sof_status = hw_res_r->bottom_half_handler(hw_res_r,
-				evt_payload);
+				hw_res_right->hw_intf->hw_idx);
+		if (core_idx == hw_res_right->hw_intf->hw_idx) {
+			sof_status = hw_res_right->bottom_half_handler(
+				hw_res_right, evt_payload);
 			if (!sof_status)
 				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
 			else
 				break;
 		}
 
-		core_index0 = hw_res_l->hw_intf->hw_idx;
-		core_index1 = hw_res_r->hw_intf->hw_idx;
+		core_index0 = hw_res_left->hw_intf->hw_idx;
+		core_index1 = hw_res_right->hw_intf->hw_idx;
 
 		if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
 			break;
@@ -4867,8 +4925,8 @@ static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
 	void                              *payload)
 {
 	int32_t rc = -EINVAL;
-	struct cam_isp_resource_node         *hw_res_l = NULL;
-	struct cam_isp_resource_node         *hw_res_r = NULL;
+	struct cam_isp_resource_node         *hw_res_left = NULL;
+	struct cam_isp_resource_node         *hw_res_right = NULL;
 	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
 	struct cam_vfe_top_irq_evt_payload   *evt_payload;
 	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
@@ -4901,8 +4959,8 @@ static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
 			(isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
 			continue;
 
-		hw_res_l = isp_ife_camif_res->hw_res[0];
-		hw_res_r = isp_ife_camif_res->hw_res[1];
+		hw_res_left = isp_ife_camif_res->hw_res[0];
+		hw_res_right = isp_ife_camif_res->hw_res[1];
 
 		CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
 				isp_ife_camif_res->is_dual_vfe);
@@ -4910,17 +4968,17 @@ static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
 		/* Handling Single VFE Scenario */
 		case 0:
 			/* EOF check for Left side VFE */
-			if (!hw_res_l) {
+			if (!hw_res_left) {
 				pr_err("%s: VFE Device is NULL\n",
 					__func__);
 				break;
 			}
 			CAM_DBG(CAM_ISP, "curr_core_idx = %d, core idx hw = %d",
-					core_idx, hw_res_l->hw_intf->hw_idx);
+					core_idx, hw_res_left->hw_intf->hw_idx);
 
-			if (core_idx == hw_res_l->hw_intf->hw_idx) {
-				eof_status = hw_res_l->bottom_half_handler(
-					hw_res_l, evt_payload);
+			if (core_idx == hw_res_left->hw_intf->hw_idx) {
+				eof_status = hw_res_left->bottom_half_handler(
+					hw_res_left, evt_payload);
 				if (atomic_read(
 					&ife_hwr_mgr_ctx->overflow_pending))
 					break;
@@ -4934,13 +4992,13 @@ static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
 			break;
 		/* Handling dual VFE Scenario */
 		case 1:
-			if ((!hw_res_l) || (!hw_res_r)) {
+			if ((!hw_res_left) || (!hw_res_right)) {
 				CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
 				break;
 			}
-			if (core_idx == hw_res_l->hw_intf->hw_idx) {
-				eof_status = hw_res_l->bottom_half_handler(
-					hw_res_l, evt_payload);
+			if (core_idx == hw_res_left->hw_intf->hw_idx) {
+				eof_status = hw_res_left->bottom_half_handler(
+					hw_res_left, evt_payload);
 
 				if (!eof_status)
 					ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
@@ -4949,9 +5007,9 @@ static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
 			}
 
 			/* EOF check for Right side VFE */
-			if (core_idx == hw_res_r->hw_intf->hw_idx) {
-				eof_status = hw_res_r->bottom_half_handler(
-					hw_res_r, evt_payload);
+			if (core_idx == hw_res_right->hw_intf->hw_idx) {
+				eof_status = hw_res_right->bottom_half_handler(
+					hw_res_right, evt_payload);
 
 				if (!eof_status)
 					ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
@@ -4959,8 +5017,8 @@ static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
 					break;
 			}
 
-			core_index0 = hw_res_l->hw_intf->hw_idx;
-			core_index1 = hw_res_r->hw_intf->hw_idx;
+			core_index0 = hw_res_left->hw_intf->hw_idx;
+			core_index1 = hw_res_right->hw_intf->hw_idx;
 
 			rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
 					ife_hwr_mgr_ctx,
@@ -4999,7 +5057,7 @@ static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
 	int32_t                              i;
 	int32_t                              rc = 0;
 	cam_hw_event_cb_func                 ife_hwr_irq_wm_done_cb;
-	struct cam_isp_resource_node        *hw_res_l = NULL;
+	struct cam_isp_resource_node        *hw_res_left = NULL;
 	struct cam_ife_hw_mgr_ctx           *ife_hwr_mgr_ctx = NULL;
 	struct cam_vfe_bus_irq_evt_payload  *evt_payload = payload;
 	struct cam_ife_hw_mgr_res           *isp_ife_out_res = NULL;
@@ -5023,7 +5081,7 @@ static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
 		if (isp_ife_out_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
 			continue;
 
-		hw_res_l = isp_ife_out_res->hw_res[0];
+		hw_res_left = isp_ife_out_res->hw_res[0];
 
 		/*
 		 * DUAL VFE: Index 0 is always a master. In case of composite
@@ -5032,10 +5090,10 @@ static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
 		 * Index zero is valid
 		 */
 
-		if (hw_res_l && (evt_payload->core_index ==
-			hw_res_l->hw_intf->hw_idx))
-			buf_done_status = hw_res_l->bottom_half_handler(
-				hw_res_l, evt_payload);
+		if (hw_res_left && (evt_payload->core_index ==
+			hw_res_left->hw_intf->hw_idx))
+			buf_done_status = hw_res_left->bottom_half_handler(
+				hw_res_left, evt_payload);
 		else
 			continue;
 
@@ -5166,9 +5224,10 @@ int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
 	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
 		(void *)evt_payload,
 		evt_payload->core_index);
-	CAM_DBG(CAM_ISP, "irq_status_0: = %x", evt_payload->irq_reg_val[0]);
-	CAM_DBG(CAM_ISP, "irq_status_1: = %x", evt_payload->irq_reg_val[1]);
-	CAM_DBG(CAM_ISP, "Violation register: = %x",
+	CAM_DBG(CAM_ISP,
+		"irq_status_0 = 0x%x, irq_status_1 = 0x%x, irq_status_2 = 0x%x ",
+		evt_payload->irq_reg_val[0],
+		evt_payload->irq_reg_val[1],
 		evt_payload->irq_reg_val[2]);
 
 	/*
@@ -5195,10 +5254,12 @@ int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
 	cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
 		evt_payload_priv);
 
-	CAM_DBG(CAM_ISP, "Calling RUP");
-	/* REG UPDATE */
-	cam_ife_hw_mgr_handle_reg_update(ife_hwr_mgr_ctx,
+	if (evt_payload->hw_version != CAM_CPAS_TITAN_480_V100) {
+		CAM_DBG(CAM_ISP, "Calling RUP");
+		/* REG UPDATE */
+		cam_ife_hw_mgr_handle_reg_update(ife_hwr_mgr_ctx,
 		evt_payload_priv);
+	}
 
 	CAM_DBG(CAM_ISP, "Calling EPOCH");
 	/* EPOCH IRQ */
@@ -5210,6 +5271,41 @@ int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
 	return IRQ_HANDLED;
 }
 
+
+int cam_ife_mgr_do_tasklet_reg_update(
+	void *handler_priv, void *evt_payload_priv)
+{
+	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx = handler_priv;
+	struct cam_vfe_bus_irq_evt_payload   *evt_payload;
+	int                                   rc = -EINVAL;
+
+	evt_payload = evt_payload_priv;
+
+	if (!evt_payload_priv || !handler_priv) {
+		CAM_ERR(CAM_ISP, "Invalid handle:%pK or event payload:%pK",
+			handler_priv, evt_payload_priv);
+		return rc;
+	}
+	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
+
+	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
+		(void *)evt_payload,
+		evt_payload->core_index);
+	CAM_DBG(CAM_ISP,
+		"bus_irq_status_0: = 0x%x, bus_irq_status_1: = 0x%x, calling RUP",
+		evt_payload->irq_reg_val[0],
+		evt_payload->irq_reg_val[1]);
+	/* REG UPDATE */
+	rc = cam_ife_hw_mgr_handle_reg_update_in_bus(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+
+	if (rc)
+		CAM_ERR(CAM_ISP,
+			"Encountered Error, rc = %d", rc);
+
+	return rc;
+}
+
 static int cam_ife_hw_mgr_sort_dev_with_caps(
 	struct cam_ife_hw_mgr *ife_hw_mgr)
 {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 5ac1510..f9e44c6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_IFE_HW_MGR_H_
@@ -232,4 +232,16 @@ int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv, void *evt_payload_priv);
  */
 int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv);
 
+/**
+ * cam_ife_mgr_do_tasklet_reg_update()
+ *
+ * @brief:              Tasklet handle function for reg update
+ *
+ * @handler_priv:       Tasklet information handle
+ * @evt_payload_priv:   Event payload for the handler funciton
+ *
+ */
+int cam_ife_mgr_do_tasklet_reg_update(void *handler_priv,
+	void *evt_payload_priv);
+
 #endif /* _CAM_IFE_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index ffdf126..cde382b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -52,7 +52,8 @@ enum cam_vfe_hw_irq_status {
 enum cam_vfe_hw_irq_regs {
 	CAM_IFE_IRQ_CAMIF_REG_STATUS0           = 0,
 	CAM_IFE_IRQ_CAMIF_REG_STATUS1           = 1,
-	CAM_IFE_IRQ_VIOLATION_STATUS            = 2,
+	CAM_IFE_IRQ_CAMIF_REG_STATUS2           = 2,
+	CAM_IFE_IRQ_VIOLATION_STATUS            = 3,
 	CAM_IFE_IRQ_REGISTERS_MAX,
 };
 
@@ -232,6 +233,7 @@ struct cam_vfe_bw_control_args {
  *                           handled
  * @error_type:              Identify different errors
  * @ts:                      Timestamp
+ * @hw_version:              CPAS hw version
  */
 struct cam_vfe_top_irq_evt_payload {
 	struct list_head           list;
@@ -241,6 +243,7 @@ struct cam_vfe_top_irq_evt_payload {
 	uint32_t                   irq_reg_val[CAM_IFE_IRQ_REGISTERS_MAX];
 	uint32_t                   error_type;
 	struct cam_isp_timestamp   ts;
+	uint32_t                   hw_version;
 };
 
 /*
@@ -285,12 +288,14 @@ struct cam_vfe_bus_irq_evt_payload {
  * @mem_base:                Mapped base address of the register space
  * @reset_complete:          Completion structure to be signaled if Reset IRQ
  *                           is Set
+ * @hw_version:              CPAS hw version
  */
 struct cam_vfe_irq_handler_priv {
 	uint32_t                     core_index;
 	void                        *core_info;
 	void __iomem                *mem_base;
 	struct completion           *reset_complete;
+	uint32_t                     hw_version;
 };
 
 /*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index c2cae97..5bd3eb2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/delay.h>
@@ -16,16 +16,19 @@
 #include "cam_vfe_top.h"
 #include "cam_ife_hw_mgr.h"
 #include "cam_debug_util.h"
+#include "cam_cpas_api.h"
 
 static const char drv_name[] = "vfe";
 static uint32_t irq_reg_offset[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x0000006C,
-	0x00000070,
-	0x0000007C,
+	0x00000054,
+	0x00000058,
+	0x0000005C,
+	0x00000074,
 };
 
 static uint32_t camif_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x00000017,
+	0x00000000,
+	0x00000007,
 	0x00000000,
 };
 
@@ -35,12 +38,14 @@ static uint32_t camif_fe_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
 };
 
 static uint32_t camif_irq_err_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x0003FC00,
-	0xEFFF7EBC,
+	0xFBE00200,
+	0x00000000,
+	0x303FFF80,
 };
 
 static uint32_t rdi_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
-	0x780001e0,
+	0x38E00000,
+	0xFFF00000,
 	0x00000000,
 };
 
@@ -126,21 +131,34 @@ int cam_vfe_reset_irq_top_half(uint32_t    evt_id,
 	handler_priv = th_payload->handler_priv;
 
 	CAM_DBG(CAM_ISP, "Enter");
-	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
 
-	if (th_payload->evt_status_arr[0] & (1<<31)) {
-		/*
-		 * Clear All IRQs to avoid spurious IRQs immediately
-		 * after Reset Done.
-		 */
-		cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x64);
-		cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x68);
-		cam_io_w(0x1, handler_priv->mem_base + 0x58);
-		CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
-		complete(handler_priv->reset_complete);
+	/*
+	 * Clear All IRQs to avoid spurious IRQs immediately
+	 * after Reset Done.
+	 */
 
-
-		rc = 0;
+	switch (handler_priv->hw_version) {
+	case CAM_CPAS_TITAN_480_V100:
+		if (th_payload->evt_status_arr[0] & 0x1) {
+			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x48);
+			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x4C);
+			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x50);
+			cam_io_w(0x1, handler_priv->mem_base + 0x38);
+			CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
+			complete(handler_priv->reset_complete);
+			rc = 0;
+		}
+		break;
+	default:
+		if (th_payload->evt_status_arr[0] & (1<<31)) {
+			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x64);
+			cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x68);
+			cam_io_w(0x00000001, handler_priv->mem_base + 0x58);
+			CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
+			complete(handler_priv->reset_complete);
+			rc = 0;
+		}
+		break;
 	}
 
 	CAM_DBG(CAM_ISP, "Exit");
@@ -378,9 +396,9 @@ int cam_vfe_deinit_hw(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
 
 int cam_vfe_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
 {
-	struct cam_hw_info                *vfe_hw  = hw_priv;
-	struct cam_hw_soc_info            *soc_info = NULL;
-	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info *vfe_hw  = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_vfe_hw_core_info *core_info = NULL;
 	int rc;
 
 	CAM_DBG(CAM_ISP, "Enter");
@@ -396,6 +414,7 @@ int cam_vfe_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
 	core_info->irq_payload.core_index = soc_info->index;
 	core_info->irq_payload.mem_base =
 		vfe_hw->soc_info.reg_map[VFE_CORE_BASE_IDX].mem_base;
+	core_info->irq_payload.hw_version = soc_info->hw_version;
 	core_info->irq_payload.core_info = core_info;
 	core_info->irq_payload.reset_complete = &vfe_hw->hw_complete;
 
@@ -450,8 +469,10 @@ static int cam_vfe_irq_top_half(uint32_t    evt_id,
 
 	handler_priv = th_payload->handler_priv;
 
-	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
-	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+	for (i = 0; i < th_payload->num_registers; i++)
+		CAM_DBG(CAM_ISP, "IRQ status_%d = 0x%x",
+		i, th_payload->evt_status_arr[i]);
+
 
 	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
 	if (rc) {
@@ -469,15 +490,16 @@ static int cam_vfe_irq_top_half(uint32_t    evt_id,
 	evt_payload->core_index = handler_priv->core_index;
 	evt_payload->core_info  = handler_priv->core_info;
 	evt_payload->evt_id  = evt_id;
+	evt_payload->hw_version = handler_priv->hw_version;
 
 	for (i = 0; i < th_payload->num_registers; i++)
 		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
 
-	for (; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) {
-		evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
-			irq_reg_offset[i]);
-	}
-	CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
+	evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
+		irq_reg_offset[i]);
+
+	CAM_DBG(CAM_ISP,
+		"Violation status = 0x%x", evt_payload->irq_reg_val[i]);
 
 	th_payload->evt_payload_priv = evt_payload;
 
@@ -572,7 +594,8 @@ int cam_vfe_start(void *hw_priv, void *start_args, uint32_t arg_size)
 	struct cam_vfe_hw_core_info       *core_info = NULL;
 	struct cam_hw_info                *vfe_hw  = hw_priv;
 	struct cam_isp_resource_node      *isp_res;
-	int rc = 0;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	int                                rc = 0;
 
 	if (!hw_priv || !start_args ||
 		(arg_size != sizeof(struct cam_isp_resource_node))) {
@@ -580,9 +603,11 @@ int cam_vfe_start(void *hw_priv, void *start_args, uint32_t arg_size)
 		return -EINVAL;
 	}
 
+	soc_info = &vfe_hw->soc_info;
 	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
 	isp_res = (struct cam_isp_resource_node  *)start_args;
 	core_info->tasklet_info = isp_res->tasklet_info;
+	core_info->irq_payload.hw_version = soc_info->hw_version;
 
 	mutex_lock(&vfe_hw->hw_mutex);
 	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
@@ -823,7 +848,8 @@ int cam_vfe_core_init(struct cam_vfe_hw_core_info  *core_info,
 		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX),
 		vfe_hw_info->irq_reg_info, &core_info->vfe_irq_controller);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Error! cam_irq_controller_init failed");
+		CAM_ERR(CAM_ISP,
+			"Error, cam_irq_controller_init failed rc = %d", rc);
 		return rc;
 	}
 
@@ -831,7 +857,7 @@ int cam_vfe_core_init(struct cam_vfe_hw_core_info  *core_info,
 		soc_info, hw_intf, vfe_hw_info->top_hw_info,
 		&core_info->vfe_top);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Error! cam_vfe_top_init failed");
+		CAM_ERR(CAM_ISP, "Error, cam_vfe_top_init failed rc = %d", rc);
 		goto deinit_controller;
 	}
 
@@ -840,7 +866,7 @@ int cam_vfe_core_init(struct cam_vfe_hw_core_info  *core_info,
 		vfe_hw_info->bus_hw_info, core_info->vfe_irq_controller,
 		&core_info->vfe_bus);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Error! cam_vfe_bus_init failed");
+		CAM_ERR(CAM_ISP, "Error, cam_vfe_bus_init failed rc = %d", rc);
 		goto deinit_top;
 	}
 
@@ -850,7 +876,7 @@ int cam_vfe_core_init(struct cam_vfe_hw_core_info  *core_info,
 			soc_info, hw_intf, vfe_hw_info->bus_rd_hw_info,
 			core_info->vfe_irq_controller, &core_info->vfe_rd_bus);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Error! RD cam_vfe_bus_init failed");
+			CAM_WARN(CAM_ISP, "Error, RD cam_vfe_bus_init failed");
 			rc = 0;
 		}
 		CAM_DBG(CAM_ISP, "vfe_bus_rd %pK hw_idx %d",
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
index ea1c04f..1d3a4f3e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -99,7 +99,7 @@ int cam_vfe_init_soc_resources(struct cam_hw_soc_info *soc_info,
 		CAM_VFE_DSP_CLK_NAME, &soc_private->dsp_clk,
 		&soc_private->dsp_clk_index, &soc_private->dsp_clk_rate);
 	if (rc)
-		CAM_WARN(CAM_ISP, "option clk get failed");
+		CAM_WARN(CAM_ISP, "Option clk get failed with rc %d", rc);
 
 	rc = cam_vfe_request_platform_resource(soc_info, vfe_irq_handler,
 		irq_data);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/Makefile
index 5db781d..d8c2bd5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/Makefile
@@ -13,4 +13,4 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
 
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe17x.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
index 63def32..bbbafc3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
@@ -6,11 +6,147 @@
 
 #ifndef _CAM_VFE480_H_
 #define _CAM_VFE480_H_
-
+#include "cam_vfe_camif_ver3.h"
+#include "cam_vfe_camif_ver2.h"
+#include "cam_vfe_camif_lite_ver2.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_top_ver3.h"
+#include "cam_vfe_core.h"
 #include "cam_vfe_bus_ver3.h"
 #include "cam_irq_controller.h"
 #include "cam_vfe175.h"
 
+static struct cam_irq_register_set vfe480_top_irq_reg_set[3] = {
+	{
+		.mask_reg_offset   = 0x0000003C,
+		.clear_reg_offset  = 0x00000048,
+		.status_reg_offset = 0x00000054,
+	},
+	{
+		.mask_reg_offset   = 0x00000040,
+		.clear_reg_offset  = 0x0000004C,
+		.status_reg_offset = 0x00000058,
+	},
+		{
+		.mask_reg_offset   = 0x00000044,
+		.clear_reg_offset  = 0x00000050,
+		.status_reg_offset = 0x0000005C,
+	},
+};
+
+static struct cam_irq_controller_reg_info vfe480_top_irq_reg_info = {
+	.num_registers = 3,
+	.irq_reg_set = vfe480_top_irq_reg_set,
+	.global_clear_offset  = 0x00000038,
+	.global_clear_bitmask = 0x00000001,
+};
+
+static struct cam_vfe_camif_ver3_pp_clc_reg vfe480_camif_reg = {
+	.hw_version                  = 0x00002600,
+	.hw_status                   = 0x00002604,
+	.module_cfg                  = 0x00002660,
+	.pdaf_raw_crop_width_cfg     = 0x00002668,
+	.pdaf_raw_crop_height_cfg    = 0x0000266C,
+	.line_skip_pattern           = 0x00002670,
+	.pixel_skip_pattern          = 0x00002674,
+	.period_cfg                  = 0x00002678,
+	.irq_subsample_pattern       = 0x0000267C,
+	.epoch_irq_cfg               = 0x00002680,
+	.debug_1                     = 0x000027F0,
+	.debug_0                     = 0x000027F4,
+	.test_bus_ctrl               = 0x000027F8,
+	.spare                       = 0x000027FC,
+	.reg_update_cmd              = 0x00000034,
+};
+
+static struct cam_vfe_camif_ver3_reg_data vfe_480_camif_reg_data = {
+	.pp_extern_reg_update_shift      = 4,
+	.lcr_extern_reg_update_shift     = 16,
+	.dual_pd_extern_reg_update_shift = 17,
+	.extern_reg_update_mask          = 1,
+	.pixel_pattern_shift             = 24,
+	.pixel_pattern_mask              = 0x7,
+	.dsp_mode_shift                  = 24,
+	.dsp_mode_mask                   = 0x1,
+	.dsp_en_shift                    = 23,
+	.dsp_en_mask                     = 0x1,
+	.reg_update_cmd_data             = 0x41,
+	.epoch_line_cfg                  = 0x00000014,
+	.sof_irq_mask                    = 0x00000001,
+	.epoch0_irq_mask                 = 0x00000004,
+	.epoch1_irq_mask                 = 0x00000008,
+	.reg_update_irq_mask             = 0x00000001,
+	.eof_irq_mask                    = 0x00000002,
+	.error_irq_mask0                 = 0x0003FC00,
+	.error_irq_mask2                 = 0xEFFF7E80,
+	.enable_diagnostic_hw            = 0x1,
+	.pp_camif_cfg_en_shift           = 0,
+	.pp_camif_cfg_ife_out_en_shift   = 8,
+};
+
+static struct cam_vfe_top_ver3_reg_offset_common vfe480_top_common_reg = {
+	.hw_version               = 0x00000000,
+	.titan_version            = 0x00000004,
+	.hw_capability            = 0x00000008,
+	.lens_feature             = 0x0000000C,
+	.stats_feature            = 0x00000010,
+	.color_feature            = 0x00000014,
+	.zoom_feature             = 0x00000018,
+	.global_reset_cmd         = 0x0000001C,
+	.core_cfg_0               = 0x0000002C,
+	.core_cfg_1               = 0x00000030,
+	.reg_update_cmd           = 0x00000034,
+	.violation_status         = 0x00000074,
+	.core_cgc_ovd_0           = 0x00000020,
+	.core_cgc_ovd_1           = 0x00000094,
+	.ahb_cgc_ovd              = 0x00000024,
+	.noc_cgc_ovd              = 0x00000028,
+	.trigger_cdm_events       = 0x00000090,
+	.sbi_frame_idx            = 0x00000110,
+	.dsp_status               = 0x0000007C,
+	.diag_config              = 0x00000064,
+	.diag_sensor_status_0     = 0x00000068,
+	.diag_sensor_status_1     = 0x00000098,
+};
+
+static struct cam_vfe_rdi_ver2_reg vfe480_rdi_reg = {
+	.reg_update_cmd           = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_480_rdi_0_data = {
+	.reg_update_cmd_data      = 0x2,
+	.sof_irq_mask             = 0x8000000,
+	.reg_update_irq_mask      = 0x20,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_480_rdi_1_data = {
+	.reg_update_cmd_data      = 0x4,
+	.sof_irq_mask             = 0x10000000,
+	.reg_update_irq_mask      = 0x40,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_480_rdi_2_data = {
+	.reg_update_cmd_data      = 0x8,
+	.sof_irq_mask             = 0x20000000,
+	.reg_update_irq_mask      = 0x80,
+};
+
+static struct cam_vfe_top_ver3_hw_info vfe480_top_hw_info = {
+	.common_reg = &vfe480_top_common_reg,
+	.camif_hw_info = {
+		.common_reg     = &vfe480_top_common_reg,
+		.camif_reg      = &vfe480_camif_reg,
+		.reg_data       = &vfe_480_camif_reg_data,
+		},
+	.mux_type = {
+		CAM_VFE_CAMIF_VER_3_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_CAMIF_LITE_VER_2_0,
+	},
+};
+
 static struct cam_irq_register_set vfe480_bus_irq_reg[2] = {
 		{
 			.mask_reg_offset   = 0x0000AA18,
@@ -1028,7 +1164,7 @@ static struct cam_vfe_bus_rd_ver1_hw_info vfe480_bus_rd_hw_info = {
 };
 
 struct cam_vfe_hw_info cam_vfe480_hw_info = {
-	.irq_reg_info                  = &vfe175_top_irq_reg_info,
+	.irq_reg_info                  = &vfe480_top_irq_reg_info,
 
 	.bus_version                   = CAM_VFE_BUS_VER_3_0,
 	.bus_hw_info                   = &vfe480_bus_hw_info,
@@ -1036,15 +1172,14 @@ struct cam_vfe_hw_info cam_vfe480_hw_info = {
 	.bus_rd_version                = CAM_VFE_BUS_RD_VER_1_0,
 	.bus_rd_hw_info                = &vfe480_bus_rd_hw_info,
 
-	.top_version                   = CAM_VFE_TOP_VER_2_0,
-	.top_hw_info                   = &vfe175_top_hw_info,
+	.top_version                   = CAM_VFE_TOP_VER_3_0,
+	.top_hw_info                   = &vfe480_top_hw_info,
 
-	.camif_version                 = CAM_VFE_CAMIF_VER_2_0,
-	.camif_reg                     = &vfe175_camif_reg,
+	.camif_version                 = CAM_VFE_CAMIF_VER_3_0,
+	.camif_reg                     = &vfe480_camif_reg,
 
 	.camif_lite_version            = CAM_VFE_CAMIF_LITE_VER_2_0,
 	.camif_lite_reg                = &vfe175_camif_lite_reg,
-
 };
 
 #endif /* _CAM_VFE480_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index a44469d..37afbd2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/ratelimit.h>
@@ -3252,24 +3252,27 @@ static int cam_vfe_bus_init_hw(void *hw_priv,
 		NULL,
 		NULL);
 
-	if (bus_priv->irq_handle <= 0) {
+	if ((int)bus_priv->irq_handle <= 0) {
 		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
 		return -EFAULT;
 	}
 
-	bus_priv->error_irq_handle = cam_irq_controller_subscribe_irq(
-		bus_priv->common_data.bus_irq_controller,
-		CAM_IRQ_PRIORITY_0,
-		bus_error_irq_mask,
-		bus_priv,
-		cam_vfe_bus_error_irq_top_half,
-		cam_vfe_bus_err_bottom_half,
-		bus_priv->tasklet_info,
-		&tasklet_bh_api);
+	if (bus_priv->tasklet_info != NULL) {
+		bus_priv->error_irq_handle = cam_irq_controller_subscribe_irq(
+			bus_priv->common_data.bus_irq_controller,
+			CAM_IRQ_PRIORITY_0,
+			bus_error_irq_mask,
+			bus_priv,
+			cam_vfe_bus_error_irq_top_half,
+			cam_vfe_bus_err_bottom_half,
+			bus_priv->tasklet_info,
+			&tasklet_bh_api);
 
-	if (bus_priv->irq_handle <= 0) {
-		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
-		return -EFAULT;
+		if ((int)bus_priv->error_irq_handle <= 0) {
+			CAM_ERR(CAM_ISP, "Failed to subscribe BUS error IRQ %d",
+				bus_priv->error_irq_handle);
+			return -EFAULT;
+		}
 	}
 
 	/*Set Debug Registers*/
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
index c0dffc4..5602e7c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
@@ -44,6 +44,11 @@ static uint32_t bus_error_irq_mask[2] = {
 	0x00000000,
 };
 
+static uint32_t rup_irq_mask[2] = {
+	0x0000003F,
+	0x00000000,
+};
+
 enum cam_vfe_bus_ver3_packer_format {
 	PACKER_FMT_VER3_PLAIN_128,
 	PACKER_FMT_VER3_PLAIN_8,
@@ -169,6 +174,7 @@ struct cam_vfe_bus_ver3_priv {
 
 	uint32_t                            irq_handle;
 	uint32_t                            error_irq_handle;
+	uint32_t                            rup_irq_handle;
 	void                               *tasklet_info;
 };
 
@@ -221,7 +227,7 @@ static int cam_vfe_bus_ver3_put_evt_payload(void     *core_info,
 	status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS1];
 
 	if (status_reg0 || status_reg1) {
-		CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x status2 0x%x",
+		CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x",
 			status_reg0, status_reg1);
 		return 0;
 	}
@@ -824,6 +830,76 @@ static enum cam_vfe_bus_ver3_packer_format
 	}
 }
 
+static int cam_vfe_bus_ver3_handle_rup_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	int32_t                                     rc;
+	int                                         i;
+	struct cam_vfe_bus_ver3_priv               *bus_priv;
+	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
+
+	bus_priv = th_payload->handler_priv;
+	if (!bus_priv) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
+		return -ENODEV;
+	}
+
+	CAM_DBG(CAM_ISP, "bus_IRQ status_0 = 0x%x, bus_IRQ status_1 = 0x%x",
+		th_payload->evt_status_arr[0],
+		th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_bus_ver3_get_evt_payload(&bus_priv->common_data,
+		&evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0 = 0x%x status_1 = 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+
+		return rc;
+	}
+
+	evt_payload->core_index = bus_priv->common_data.core_index;
+	evt_payload->evt_id  = evt_id;
+	evt_payload->ctx = &bus_priv->common_data;
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_handle_rup_bottom_half(void *handler_priv,
+	void *evt_payload_priv)
+{
+	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_vfe_bus_irq_evt_payload   *payload;
+	uint32_t                              irq_status0;
+
+	if (!handler_priv || !evt_payload_priv) {
+		CAM_ERR(CAM_ISP, "Invalid params");
+		return ret;
+	}
+
+	payload = evt_payload_priv;
+	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
+
+	if (irq_status0 & 0x01) {
+		CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
+		ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+	CAM_DBG(CAM_ISP,
+		"event ID:%d, bus_irq_status_0 = 0x%x returning status = %d",
+		payload->evt_id, irq_status0, ret);
+
+	if (ret == CAM_VFE_IRQ_STATUS_SUCCESS)
+		cam_vfe_bus_ver3_put_evt_payload(payload->ctx, &payload);
+
+	return ret;
+}
+
 static int cam_vfe_bus_ver3_acquire_wm(
 	struct cam_vfe_bus_ver3_priv          *ver3_bus_priv,
 	struct cam_isp_out_port_info          *out_port_info,
@@ -1952,7 +2028,13 @@ static int cam_vfe_bus_ver3_handle_vfe_out_done_bottom_half(
 	int rc = -EINVAL;
 	struct cam_isp_resource_node         *vfe_out = handler_priv;
 	struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = vfe_out->res_priv;
+	struct cam_vfe_bus_irq_evt_payload   *evt_payload = evt_payload_priv;
 
+	if (evt_payload->evt_id == CAM_ISP_HW_EVENT_REG_UPDATE) {
+		rc = cam_vfe_bus_ver3_handle_rup_bottom_half(
+			handler_priv, evt_payload_priv);
+		return rc;
+	}
 	/* We only handle composite buf done */
 	if (rsrc_data->comp_grp) {
 		rc = rsrc_data->comp_grp->bottom_half_handler(
@@ -2763,7 +2845,7 @@ static int cam_vfe_bus_ver3_init_hw(void *hw_priv,
 		NULL,
 		NULL);
 
-	if (bus_priv->irq_handle <= 0) {
+	if ((int)bus_priv->irq_handle <= 0) {
 		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
 		return -EFAULT;
 	}
@@ -2779,12 +2861,29 @@ static int cam_vfe_bus_ver3_init_hw(void *hw_priv,
 			bus_priv->tasklet_info,
 			&tasklet_bh_api);
 
-		if (bus_priv->error_irq_handle <= 0) {
+		if ((int)bus_priv->error_irq_handle <= 0) {
 			CAM_ERR(CAM_ISP, "Failed to subscribe BUS Error IRQ");
 			return -EFAULT;
 		}
 	}
 
+	if (bus_priv->tasklet_info != NULL) {
+		bus_priv->rup_irq_handle = cam_irq_controller_subscribe_irq(
+			bus_priv->common_data.bus_irq_controller,
+			CAM_IRQ_PRIORITY_0,
+			rup_irq_mask,
+			bus_priv,
+			cam_vfe_bus_ver3_handle_rup_top_half,
+			cam_ife_mgr_do_tasklet_reg_update,
+			bus_priv->tasklet_info,
+			&tasklet_bh_api);
+
+		if (bus_priv->rup_irq_handle <= 0) {
+			CAM_ERR(CAM_ISP, "Failed to subscribe RUP IRQ");
+			return -EFAULT;
+		}
+	}
+
 	// no clock gating at bus input
 	cam_io_w_mb(0xFFFFF, bus_priv->common_data.mem_base +
 		bus_priv->common_data.common_reg->cgc_ovd);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
index 97ef544..373d6fc 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
@@ -12,6 +12,6 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
 
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_camif_lite_ver2.o
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_top.o cam_vfe_top_ver2.o cam_vfe_camif_ver2.o cam_vfe_rdi.o
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_fe_ver1.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_camif_lite_ver2.o cam_vfe_top.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_top_ver3.o cam_vfe_top_ver2.o cam_vfe_camif_ver2.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_camif_ver3.o cam_vfe_rdi.o cam_vfe_fe_ver1.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h
new file mode 100644
index 0000000..3a2bca0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_CAMIF_LITE_VER3_H_
+#define _CAM_VFE_CAMIF_LITE_VER3_H_
+
+#include "cam_isp_hw.h"
+#include "cam_vfe_top.h"
+
+struct cam_vfe_camif_lite_ver3_reg {
+	uint32_t     lite_hw_version;
+	uint32_t     lite_hw_status;
+	uint32_t     lite_module_config;
+	uint32_t     lite_skip_period;
+	uint32_t     lite_irq_subsample_pattern;
+	uint32_t     lite_epoch_irq;
+	uint32_t     lite_debug_1;
+	uint32_t     lite_debug_0;
+	uint32_t     lite_test_bus_ctrl;
+	uint32_t     camif_lite_spare;
+	uint32_t     reg_update_cmd;
+};
+
+struct cam_vfe_camif_lite_ver3_reg_data {
+	uint32_t     extern_reg_update_shift;
+	uint32_t     reg_update_cmd_data;
+	uint32_t     epoch_line_cfg;
+	uint32_t     sof_irq_mask;
+	uint32_t     epoch0_irq_mask;
+	uint32_t     epoch1_irq_mask;
+	uint32_t     eof_irq_mask;
+	uint32_t     error_irq_mask0;
+	uint32_t     error_irq_mask2;
+	uint32_t     enable_diagnostic_hw;
+};
+
+struct cam_vfe_camif_lite_ver3_hw_info {
+	struct cam_vfe_top_ver3_reg_offset_common   *common_reg;
+	struct cam_vfe_camif_lite_ver3_reg          *camif_lite_reg;
+	struct cam_vfe_camif_lite_ver3_reg_data     *reg_data;
+};
+
+int cam_vfe_camif_lite_ver3_acquire_resource(
+	struct cam_isp_resource_node          *camif_lite_res,
+	void                                  *acquire_param);
+
+int cam_vfe_camif_lite_ver3_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *camif_lite_hw_info,
+	struct cam_isp_resource_node  *camif_lite_node);
+
+int cam_vfe_camif_lite_ver3_deinit(
+	struct cam_isp_resource_node  *camif_node);
+
+#endif /* _CAM_VFE_CAMIF_LITE_VER3_H_ */
+
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
new file mode 100644
index 0000000..c165115
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
@@ -0,0 +1,694 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_io_util.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_isp_hw.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_soc.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver3.h"
+#include "cam_vfe_camif_ver3.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+#include "cam_cpas_api.h"
+
+#define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2
+
+struct cam_vfe_mux_camif_ver3_data {
+	void __iomem                                *mem_base;
+	struct cam_hw_intf                          *hw_intf;
+	struct cam_vfe_camif_ver3_pp_clc_reg        *camif_reg;
+	struct cam_vfe_top_ver3_reg_offset_common   *common_reg;
+	struct cam_vfe_camif_ver3_reg_data          *reg_data;
+	struct cam_hw_soc_info                      *soc_info;
+
+	enum cam_isp_hw_sync_mode          sync_mode;
+	uint32_t                           dsp_mode;
+	uint32_t                           pix_pattern;
+	uint32_t                           first_pixel;
+	uint32_t                           first_line;
+	uint32_t                           last_pixel;
+	uint32_t                           last_line;
+	bool                               enable_sof_irq_debug;
+	uint32_t                           irq_debug_cnt;
+	uint32_t                           camif_debug;
+};
+
+static int cam_vfe_camif_ver3_validate_pix_pattern(uint32_t pattern)
+{
+	int rc;
+
+	switch (pattern) {
+	case CAM_ISP_PATTERN_BAYER_RGRGRG:
+	case CAM_ISP_PATTERN_BAYER_GRGRGR:
+	case CAM_ISP_PATTERN_BAYER_BGBGBG:
+	case CAM_ISP_PATTERN_BAYER_GBGBGB:
+	case CAM_ISP_PATTERN_YUV_YCBYCR:
+	case CAM_ISP_PATTERN_YUV_YCRYCB:
+	case CAM_ISP_PATTERN_YUV_CBYCRY:
+	case CAM_ISP_PATTERN_YUV_CRYCBY:
+		rc = 0;
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Error, Invalid pix pattern:%d", pattern);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int cam_vfe_camif_ver3_get_reg_update(
+	struct cam_isp_resource_node  *camif_res,
+	void *cmd_args, uint32_t arg_size)
+{
+	uint32_t                           size = 0;
+	uint32_t                           reg_val_pair[2];
+	struct cam_isp_hw_get_cmd_update   *cdm_args = cmd_args;
+	struct cam_cdm_utils_ops           *cdm_util_ops = NULL;
+	struct cam_vfe_mux_camif_ver3_data *rsrc_data = NULL;
+
+	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+		CAM_ERR(CAM_ISP, "Invalid arg size: %d expected:%d",
+			arg_size, sizeof(struct cam_isp_hw_get_cmd_update));
+		return -EINVAL;
+	}
+
+	if (!cdm_args || !cdm_args->res) {
+		CAM_ERR(CAM_ISP, "Invalid args: %pK", cdm_args);
+		return -EINVAL;
+	}
+
+	cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+	if (!cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Invalid CDM ops");
+		return -EINVAL;
+	}
+
+	size = cdm_util_ops->cdm_required_size_reg_random(1);
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > cdm_args->cmd.size) {
+		CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
+			cdm_args->cmd.size, (size*4));
+		return -EINVAL;
+	}
+
+	rsrc_data = camif_res->res_priv;
+	reg_val_pair[0] = rsrc_data->camif_reg->reg_update_cmd;
+	reg_val_pair[1] = rsrc_data->reg_data->reg_update_cmd_data;
+	CAM_DBG(CAM_ISP, "CAMIF reg_update_cmd 0x%x offset 0x%x",
+		reg_val_pair[1], reg_val_pair[0]);
+
+	cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
+		1, reg_val_pair);
+
+	cdm_args->cmd.used_bytes = size * 4;
+
+	return 0;
+}
+
+int cam_vfe_camif_ver3_acquire_resource(
+	struct cam_isp_resource_node  *camif_res,
+	void                          *acquire_param)
+{
+	struct cam_vfe_mux_camif_ver3_data    *camif_data;
+	struct cam_vfe_acquire_args           *acquire_data;
+	int                                    rc = 0;
+
+	camif_data  = (struct cam_vfe_mux_camif_ver3_data *)
+		camif_res->res_priv;
+	acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
+
+	rc = cam_vfe_camif_ver3_validate_pix_pattern(
+		acquire_data->vfe_in.in_port->test_pattern);
+
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Validate pix pattern failed, rc = %d", rc);
+		return rc;
+	}
+
+	camif_data->sync_mode   = acquire_data->vfe_in.sync_mode;
+	camif_data->pix_pattern = acquire_data->vfe_in.in_port->test_pattern;
+	camif_data->dsp_mode    = acquire_data->vfe_in.in_port->dsp_mode;
+	camif_data->first_pixel = acquire_data->vfe_in.in_port->left_start;
+	camif_data->last_pixel  = acquire_data->vfe_in.in_port->left_stop;
+	camif_data->first_line  = acquire_data->vfe_in.in_port->line_start;
+	camif_data->last_line   = acquire_data->vfe_in.in_port->line_stop;
+
+	CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d",
+		camif_res->hw_intf->hw_idx,
+		camif_data->pix_pattern, camif_data->dsp_mode);
+
+	return rc;
+}
+
+static int cam_vfe_camif_ver3_resource_init(
+	struct cam_isp_resource_node *camif_res,
+	void *init_args, uint32_t arg_size)
+{
+	struct cam_vfe_mux_camif_ver3_data    *camif_data;
+	struct cam_hw_soc_info                *soc_info;
+	int                                    rc = 0;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+		return -EINVAL;
+	}
+
+	camif_data = (struct cam_vfe_mux_camif_ver3_data *)
+		camif_res->res_priv;
+
+	soc_info = camif_data->soc_info;
+
+	if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		rc = cam_vfe_soc_enable_clk(soc_info, CAM_VFE_DSP_CLK_NAME);
+		if (rc)
+			CAM_ERR(CAM_ISP,
+				"failed to enable dsp clk, rc = %d", rc);
+	}
+
+	/* All auto clock gating disabled by default */
+	CAM_INFO(CAM_ISP, "overriding clock gating");
+	cam_io_w_mb(0xFFFFFFFF, camif_data->mem_base +
+		camif_data->common_reg->core_cgc_ovd_0);
+
+	cam_io_w_mb(0xFF, camif_data->mem_base +
+		camif_data->common_reg->core_cgc_ovd_1);
+
+	cam_io_w_mb(0x1, camif_data->mem_base +
+		camif_data->common_reg->ahb_cgc_ovd);
+
+	cam_io_w_mb(0x1, camif_data->mem_base +
+		camif_data->common_reg->noc_cgc_ovd);
+
+	return rc;
+}
+
+static int cam_vfe_camif_ver3_resource_deinit(
+	struct cam_isp_resource_node        *camif_res,
+	void *init_args, uint32_t arg_size)
+{
+	struct cam_vfe_mux_camif_ver3_data    *camif_data;
+	struct cam_hw_soc_info           *soc_info;
+	int rc = 0;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+		return -EINVAL;
+	}
+
+	camif_data   = (struct cam_vfe_mux_camif_ver3_data *)
+		camif_res->res_priv;
+
+	soc_info = camif_data->soc_info;
+
+	if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		rc = cam_vfe_soc_disable_clk(soc_info, CAM_VFE_DSP_CLK_NAME);
+		if (rc)
+			CAM_ERR(CAM_ISP, "failed to disable dsp clk");
+	}
+
+	return rc;
+}
+
+static int cam_vfe_camif_ver3_resource_start(
+	struct cam_isp_resource_node *camif_res)
+{
+	struct cam_vfe_mux_camif_ver3_data  *rsrc_data;
+	uint32_t                             val = 0;
+	uint32_t                             epoch0_line_cfg;
+	uint32_t                             epoch1_line_cfg;
+	uint32_t                             computed_epoch_line_cfg;
+	struct cam_vfe_soc_private          *soc_private;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error, Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if (camif_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "Error, Invalid camif res res_state:%d",
+			camif_res->res_state);
+		return -EINVAL;
+	}
+
+	rsrc_data = (struct cam_vfe_mux_camif_ver3_data *)camif_res->res_priv;
+
+	soc_private = rsrc_data->soc_info->soc_private;
+
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error, soc_private NULL");
+		return -ENODEV;
+	}
+
+	/*config vfe core*/
+	val = (rsrc_data->pix_pattern <<
+		rsrc_data->reg_data->pixel_pattern_shift);
+	val |= (1 << rsrc_data->reg_data->pp_camif_cfg_en_shift);
+	val |= (1 << rsrc_data->reg_data->pp_camif_cfg_ife_out_en_shift);
+	cam_io_w_mb(val,
+		rsrc_data->mem_base + rsrc_data->camif_reg->module_cfg);
+	CAM_DBG(CAM_ISP, "write module_cfg val = 0x%x", val);
+	val = 0x0;
+
+	/* AF stitching by hw disabled by default
+	 * PP CAMIF currently operates only in offline mode
+	 */
+
+	if ((rsrc_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(rsrc_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		/* DSP mode reg val is CAM_ISP_DSP_MODE - 1 */
+		val |= (((rsrc_data->dsp_mode - 1) &
+			rsrc_data->reg_data->dsp_mode_mask) <<
+			rsrc_data->reg_data->dsp_mode_shift);
+		val |= (0x1 << rsrc_data->reg_data->dsp_en_shift);
+	}
+
+	if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		val |= (1 << rsrc_data->reg_data->pp_extern_reg_update_shift);
+
+	cam_io_w_mb(val,
+		rsrc_data->mem_base + rsrc_data->common_reg->core_cfg_0);
+
+	/* epoch config */
+	switch (soc_private->cpas_version) {
+	case CAM_CPAS_TITAN_480_V100:
+		epoch0_line_cfg = ((rsrc_data->last_line -
+			rsrc_data->first_line) / 4) +
+			rsrc_data->first_line;
+	/* epoch line cfg will still be configured at midpoint of the
+	 * frame width. We use '/ 4' instead of '/ 2'
+	 * cause it is multipixel path
+	 */
+		epoch1_line_cfg = rsrc_data->reg_data->epoch_line_cfg &
+			0xFFFF;
+		computed_epoch_line_cfg = (epoch1_line_cfg << 16) |
+			epoch0_line_cfg;
+		cam_io_w_mb(computed_epoch_line_cfg,
+			rsrc_data->mem_base +
+			rsrc_data->camif_reg->epoch_irq_cfg);
+		CAM_DBG(CAM_ISP, "epoch_line_cfg: 0x%x",
+			computed_epoch_line_cfg);
+		break;
+	default:
+			CAM_ERR(CAM_ISP, "Hardware version not proper: 0x%x",
+				soc_private->cpas_version);
+			return -EINVAL;
+		break;
+	}
+
+	camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	/* Reg Update */
+	cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data,
+		rsrc_data->mem_base + rsrc_data->camif_reg->reg_update_cmd);
+	CAM_DBG(CAM_ISP, "hw id:%d RUP val:0x%x", camif_res->hw_intf->hw_idx,
+		rsrc_data->reg_data->reg_update_cmd_data);
+
+	/* disable sof irq debug flag */
+	rsrc_data->enable_sof_irq_debug = false;
+	rsrc_data->irq_debug_cnt = 0;
+
+	if (rsrc_data->camif_debug &
+		CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+		val = cam_io_r_mb(rsrc_data->mem_base +
+			rsrc_data->common_reg->diag_config);
+		val |= rsrc_data->reg_data->enable_diagnostic_hw;
+		cam_io_w_mb(val, rsrc_data->mem_base +
+			rsrc_data->common_reg->diag_config);
+	}
+
+	return 0;
+}
+
+static int cam_vfe_camif_ver3_reg_dump(
+	struct cam_vfe_mux_camif_ver3_data *camif_priv)
+{
+	uint32_t val = 0, wm_idx, offset;
+	int i = 0;
+
+	for (i = 0xA3C; i <= 0xA90; i += 8) {
+		CAM_INFO(CAM_ISP,
+			"SCALING offset 0x%x val 0x%x offset 0x%x val 0x%x",
+			i, cam_io_r_mb(camif_priv->mem_base + i), i + 4,
+			cam_io_r_mb(camif_priv->mem_base + i + 4));
+	}
+
+	for (i = 0xE0C; i <= 0xE3C; i += 4) {
+		val = cam_io_r_mb(camif_priv->mem_base + i);
+		CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
+	}
+
+	for (wm_idx = 0; wm_idx <= 25; wm_idx++) {
+		offset = 0xAC00 + 0x100 * wm_idx;
+		CAM_INFO(CAM_ISP,
+			"BUS_WM%u offset 0x%x val 0x%x offset 0x%x val 0x%x",
+			wm_idx, offset,
+			cam_io_r_mb(camif_priv->mem_base + offset),
+			offset + 4, cam_io_r_mb(camif_priv->mem_base +
+			offset + 4), offset + 8,
+			cam_io_r_mb(camif_priv->mem_base + offset + 8),
+			offset + 12, cam_io_r_mb(camif_priv->mem_base +
+			offset + 12));
+	}
+
+	offset = 0x420;
+	val = cam_soc_util_r(camif_priv->soc_info, 1, offset);
+	CAM_INFO(CAM_ISP, "CAMNOC IFE02 MaxWR_LOW offset 0x%x value 0x%x",
+		offset, val);
+
+	offset = 0x820;
+	val = cam_soc_util_r(camif_priv->soc_info, 1, offset);
+	CAM_INFO(CAM_ISP, "CAMNOC IFE13 MaxWR_LOW offset 0x%x value 0x%x",
+		offset, val);
+
+	return 0;
+}
+
+static int cam_vfe_camif_ver3_reg_dump_bh(
+	struct cam_isp_resource_node *camif_res)
+{
+	struct cam_vfe_mux_camif_ver3_data *camif_priv;
+	struct cam_vfe_soc_private *soc_private;
+	uint32_t offset, val, wm_idx;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error, Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if ((camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) ||
+		(camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE))
+		return 0;
+
+	camif_priv = (struct cam_vfe_mux_camif_ver3_data *)camif_res->res_priv;
+	for (offset = 0x0; offset < 0x1000; offset += 0x4) {
+		val = cam_soc_util_r(camif_priv->soc_info, 0, offset);
+		CAM_DBG(CAM_ISP, "offset 0x%x value 0x%x", offset, val);
+	}
+
+	for (offset = 0xAA00; offset <= 0xAADC; offset += 0x4) {
+		val = cam_soc_util_r(camif_priv->soc_info, 0, offset);
+		CAM_DBG(CAM_ISP, "offset 0x%x value 0x%x", offset, val);
+	}
+
+	for (wm_idx = 0; wm_idx <= 25; wm_idx++) {
+		for (offset = 0xAC00 + 0x100 * wm_idx;
+			offset < 0xAC84 + 0x100 * wm_idx; offset += 0x4) {
+			val = cam_soc_util_r(camif_priv->soc_info, 0, offset);
+			CAM_DBG(CAM_ISP,
+				"offset 0x%x value 0x%x", offset, val);
+		}
+	}
+
+	soc_private = camif_priv->soc_info->soc_private;
+	if (soc_private->cpas_version == CAM_CPAS_TITAN_175_V120) {
+		cam_cpas_reg_read(soc_private->cpas_handle[0],
+			CAM_CPAS_REG_CAMNOC, 0x3A20, true, &val);
+		CAM_DBG(CAM_ISP, "IFE0_nRDI_MAXWR_LOW offset 0x3A20 val 0x%x",
+			val);
+
+		cam_cpas_reg_read(soc_private->cpas_handle[0],
+			CAM_CPAS_REG_CAMNOC, 0x5420, true, &val);
+		CAM_DBG(CAM_ISP, "IFE1_nRDI_MAXWR_LOW offset 0x5420 val 0x%x",
+			val);
+
+		cam_cpas_reg_read(soc_private->cpas_handle[1],
+			CAM_CPAS_REG_CAMNOC, 0x3620, true, &val);
+		CAM_DBG(CAM_ISP,
+			"IFE0123_RDI_WR_MAXWR_LOW offset 0x3620 val 0x%x", val);
+	} else {
+		cam_cpas_reg_read(soc_private->cpas_handle[0],
+			CAM_CPAS_REG_CAMNOC, 0x420, true, &val);
+		CAM_DBG(CAM_ISP, "IFE02_MAXWR_LOW offset 0x420 val 0x%x", val);
+
+		cam_cpas_reg_read(soc_private->cpas_handle[0],
+			CAM_CPAS_REG_CAMNOC, 0x820, true, &val);
+		CAM_DBG(CAM_ISP, "IFE13_MAXWR_LOW offset 0x820 val 0x%x", val);
+	}
+
+	return 0;
+}
+
+static int cam_vfe_camif_ver3_resource_stop(
+	struct cam_isp_resource_node *camif_res)
+{
+	struct cam_vfe_mux_camif_ver3_data        *camif_priv;
+	struct cam_vfe_camif_ver3_pp_clc_reg      *camif_reg;
+	int                                        rc = 0;
+	uint32_t                                   val = 0;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error, Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if ((camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) ||
+		(camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE))
+		return 0;
+
+	camif_priv = (struct cam_vfe_mux_camif_ver3_data *)camif_res->res_priv;
+	camif_reg = camif_priv->camif_reg;
+
+	if ((camif_priv->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(camif_priv->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		val = cam_io_r_mb(camif_priv->mem_base +
+			camif_priv->common_reg->core_cfg_0);
+		val &= (~(1 << camif_priv->reg_data->dsp_en_shift));
+		cam_io_w_mb(val, camif_priv->mem_base +
+			camif_priv->common_reg->core_cfg_0);
+	}
+
+	if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+		camif_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	val = cam_io_r_mb(camif_priv->mem_base +
+		camif_priv->common_reg->diag_config);
+	if (val & camif_priv->reg_data->enable_diagnostic_hw) {
+		val &= ~camif_priv->reg_data->enable_diagnostic_hw;
+		cam_io_w_mb(val, camif_priv->mem_base +
+			camif_priv->common_reg->diag_config);
+	}
+
+	return rc;
+}
+
+static int cam_vfe_camif_ver3_sof_irq_debug(
+	struct cam_isp_resource_node *rsrc_node, void *cmd_args)
+{
+	struct cam_vfe_mux_camif_ver3_data *camif_priv;
+	uint32_t *enable_sof_irq = (uint32_t *)cmd_args;
+
+	camif_priv =
+		(struct cam_vfe_mux_camif_ver3_data *)rsrc_node->res_priv;
+
+	if (*enable_sof_irq == 1)
+		camif_priv->enable_sof_irq_debug = true;
+	else
+		camif_priv->enable_sof_irq_debug = false;
+
+	return 0;
+}
+
+static int cam_vfe_camif_ver3_process_cmd(
+	struct cam_isp_resource_node *rsrc_node,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+	struct cam_vfe_mux_camif_ver3_data *camif_priv = NULL;
+
+	if (!rsrc_node || !cmd_args) {
+		CAM_ERR(CAM_ISP,
+			"Invalid input arguments rsesource node:%pK cmd_args:%pK",
+			rsrc_node, cmd_args);
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+		rc = cam_vfe_camif_ver3_get_reg_update(rsrc_node, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_REG_DUMP:
+		rc = cam_vfe_camif_ver3_reg_dump_bh(rsrc_node);
+		break;
+	case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG:
+		rc = cam_vfe_camif_ver3_sof_irq_debug(rsrc_node, cmd_args);
+		break;
+	case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG:
+		camif_priv = (struct cam_vfe_mux_camif_ver3_data *)
+			rsrc_node->res_priv;
+		camif_priv->camif_debug = *((uint32_t *)cmd_args);
+		break;
+	default:
+		CAM_ERR(CAM_ISP,
+			"unsupported process command:%d", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_vfe_camif_ver3_handle_irq_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
+	void *evt_payload_priv)
+{
+	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node         *camif_node;
+	struct cam_vfe_mux_camif_ver3_data   *camif_priv;
+	struct cam_vfe_top_irq_evt_payload   *payload;
+	uint32_t                              irq_status0;
+	uint32_t                              irq_status1;
+	uint32_t                              irq_status2;
+	uint32_t                              val;
+
+	if (!handler_priv || !evt_payload_priv) {
+		CAM_ERR(CAM_ISP,
+			"Invalid params handle_priv:%pK, evt_payload_priv:%pK",
+			handler_priv, evt_payload_priv);
+		return ret;
+	}
+
+	camif_node = handler_priv;
+	camif_priv = camif_node->res_priv;
+	payload = evt_payload_priv;
+	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
+
+	CAM_DBG(CAM_ISP,
+		"evt_id:%d, irq_status0:0x%x, irq_status1:0x%x, irq_status2:0x%x",
+		payload->evt_id, irq_status0, irq_status1, irq_status2);
+
+	switch (payload->evt_id) {
+	case CAM_ISP_HW_EVENT_SOF:
+		if (irq_status1 & camif_priv->reg_data->sof_irq_mask) {
+			if ((camif_priv->enable_sof_irq_debug) &&
+				(camif_priv->irq_debug_cnt <=
+				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
+				CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+
+				camif_priv->irq_debug_cnt++;
+				if (camif_priv->irq_debug_cnt ==
+					CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
+					camif_priv->enable_sof_irq_debug =
+						false;
+					camif_priv->irq_debug_cnt = 0;
+				}
+			} else {
+				CAM_DBG(CAM_ISP, "Received SOF");
+			}
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_EPOCH:
+		if (irq_status1 & camif_priv->reg_data->epoch0_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received EPOCH");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_EOF:
+		if (irq_status1 & camif_priv->reg_data->eof_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received EOF");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_ERROR:
+		if (irq_status2 & camif_priv->reg_data->error_irq_mask2) {
+			CAM_DBG(CAM_ISP, "Received ERROR");
+			ret = CAM_ISP_HW_ERROR_OVERFLOW;
+			cam_vfe_camif_ver3_reg_dump(camif_node->res_priv);
+		} else {
+			ret = CAM_ISP_HW_ERROR_NONE;
+		}
+
+		if (camif_priv->camif_debug &
+			CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+			val = cam_io_r(camif_priv->mem_base +
+				camif_priv->common_reg->diag_sensor_status_0);
+			CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
+				camif_priv->mem_base, val);
+		}
+		break;
+	default:
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "returing status = %d", ret);
+	return ret;
+}
+
+int cam_vfe_camif_ver3_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *camif_hw_info,
+	struct cam_isp_resource_node  *camif_node)
+{
+	struct cam_vfe_mux_camif_ver3_data *camif_priv = NULL;
+	struct cam_vfe_camif_ver3_hw_info *camif_info = camif_hw_info;
+
+	camif_priv = kzalloc(sizeof(struct cam_vfe_mux_camif_ver3_data),
+		GFP_KERNEL);
+	if (!camif_priv)
+		return -ENOMEM;
+
+	camif_node->res_priv = camif_priv;
+
+	camif_priv->mem_base    = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+	camif_priv->camif_reg   = camif_info->camif_reg;
+	camif_priv->common_reg  = camif_info->common_reg;
+	camif_priv->reg_data    = camif_info->reg_data;
+	camif_priv->hw_intf     = hw_intf;
+	camif_priv->soc_info    = soc_info;
+
+	camif_node->init    = cam_vfe_camif_ver3_resource_init;
+	camif_node->deinit  = cam_vfe_camif_ver3_resource_deinit;
+	camif_node->start   = cam_vfe_camif_ver3_resource_start;
+	camif_node->stop    = cam_vfe_camif_ver3_resource_stop;
+	camif_node->process_cmd = cam_vfe_camif_ver3_process_cmd;
+	camif_node->top_half_handler = cam_vfe_camif_ver3_handle_irq_top_half;
+	camif_node->bottom_half_handler =
+		cam_vfe_camif_ver3_handle_irq_bottom_half;
+
+	return 0;
+}
+
+int cam_vfe_camif_ver3_deinit(
+	struct cam_isp_resource_node  *camif_node)
+{
+	struct cam_vfe_mux_camif_ver3_data *camif_priv;
+
+	if (!camif_node) {
+		CAM_ERR(CAM_ISP, "Error, camif_node is NULL %pK", camif_node);
+		return -ENODEV;
+	}
+
+	camif_priv = camif_node->res_priv;
+
+	camif_node->start = NULL;
+	camif_node->stop  = NULL;
+	camif_node->process_cmd = NULL;
+	camif_node->top_half_handler = NULL;
+	camif_node->bottom_half_handler = NULL;
+	camif_node->res_priv = NULL;
+
+	if (!camif_priv) {
+		CAM_ERR(CAM_ISP, "Error, camif_priv is NULL %pK", camif_priv);
+		return -ENODEV;
+	}
+
+	kfree(camif_priv);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h
new file mode 100644
index 0000000..221cbb2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_CAMIF_VER3_H_
+#define _CAM_VFE_CAMIF_VER3_H_
+
+#include "cam_isp_hw.h"
+#include "cam_vfe_top.h"
+
+/*
+ * Debug values for camif module
+ */
+#define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS      BIT(0)
+
+struct cam_vfe_camif_ver3_pp_clc_reg {
+	uint32_t     hw_version;
+	uint32_t     hw_status;
+	uint32_t     module_cfg;
+	uint32_t     pdaf_raw_crop_width_cfg;
+	uint32_t     pdaf_raw_crop_height_cfg;
+	uint32_t     line_skip_pattern;
+	uint32_t     pixel_skip_pattern;
+	uint32_t     period_cfg;
+	uint32_t     irq_subsample_pattern;
+	uint32_t     epoch_irq_cfg;
+	uint32_t     debug_1;
+	uint32_t     debug_0;
+	uint32_t     test_bus_ctrl;
+	uint32_t     spare;
+	uint32_t     reg_update_cmd;
+};
+
+struct cam_vfe_camif_ver3_reg_data {
+	uint32_t     pp_extern_reg_update_shift;
+	uint32_t     lcr_extern_reg_update_shift;
+	uint32_t     dual_pd_extern_reg_update_shift;
+	uint32_t     extern_reg_update_mask;
+
+	uint32_t     pixel_pattern_shift;
+	uint32_t     pixel_pattern_mask;
+
+	uint32_t     dsp_mode_shift;
+	uint32_t     dsp_mode_mask;
+	uint32_t     dsp_en_shift;
+	uint32_t     dsp_en_mask;
+
+	uint32_t     reg_update_cmd_data;
+	uint32_t     epoch_line_cfg;
+	uint32_t     sof_irq_mask;
+	uint32_t     epoch0_irq_mask;
+	uint32_t     epoch1_irq_mask;
+	uint32_t     reg_update_irq_mask;
+	uint32_t     eof_irq_mask;
+	uint32_t     error_irq_mask0;
+	uint32_t     error_irq_mask2;
+
+	uint32_t     enable_diagnostic_hw;
+	uint32_t     pp_camif_cfg_en_shift;
+	uint32_t     pp_camif_cfg_ife_out_en_shift;
+};
+
+struct cam_vfe_camif_ver3_hw_info {
+	struct cam_vfe_top_ver3_reg_offset_common   *common_reg;
+	struct cam_vfe_camif_ver3_pp_clc_reg        *camif_reg;
+	struct cam_vfe_camif_ver3_reg_data          *reg_data;
+};
+
+int cam_vfe_camif_ver3_acquire_resource(
+	struct cam_isp_resource_node  *camif_res,
+	void                          *acquire_param);
+
+int cam_vfe_camif_ver3_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *camif_hw_info,
+	struct cam_isp_resource_node  *camif_node);
+
+int cam_vfe_camif_ver3_deinit(
+	struct cam_isp_resource_node  *camif_node);
+
+#endif /* _CAM_VFE_CAMIF_VER3_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
index f830ad9..287a10e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
@@ -1,10 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "cam_vfe_top.h"
 #include "cam_vfe_top_ver2.h"
+#include "cam_vfe_top_ver3.h"
 #include "cam_debug_util.h"
 
 int cam_vfe_top_init(uint32_t          top_version,
@@ -20,6 +21,10 @@ int cam_vfe_top_init(uint32_t          top_version,
 		rc = cam_vfe_top_ver2_init(soc_info, hw_intf, top_hw_info,
 			vfe_top);
 		break;
+	case CAM_VFE_TOP_VER_3_0:
+		rc = cam_vfe_top_ver3_init(soc_info, hw_intf, top_hw_info,
+			vfe_top);
+		break;
 	default:
 		CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
 		break;
@@ -37,6 +42,9 @@ int cam_vfe_top_deinit(uint32_t        top_version,
 	case CAM_VFE_TOP_VER_2_0:
 		rc = cam_vfe_top_ver2_deinit(vfe_top);
 		break;
+	case CAM_VFE_TOP_VER_3_0:
+		rc = cam_vfe_top_ver3_deinit(vfe_top);
+		break;
 	default:
 		CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
new file mode 100644
index 0000000..0e7e8f6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
@@ -0,0 +1,912 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "cam_io_util.h"
+#include "cam_cdm_util.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver3.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+#include "cam_vfe_soc.h"
+
+#define CAM_VFE_HW_RESET_HW_AND_REG_VAL       0x00003F9F
+#define CAM_VFE_HW_RESET_HW_VAL               0x00003F87
+#define CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES 3
+
+struct cam_vfe_top_ver3_common_data {
+	struct cam_hw_soc_info                     *soc_info;
+	struct cam_hw_intf                         *hw_intf;
+	struct cam_vfe_top_ver3_reg_offset_common  *common_reg;
+};
+
+struct cam_vfe_top_ver3_priv {
+	struct cam_vfe_top_ver3_common_data common_data;
+	struct cam_isp_resource_node        mux_rsrc[CAM_VFE_TOP_VER3_MUX_MAX];
+	unsigned long                       hw_clk_rate;
+	struct cam_axi_vote                 applied_axi_vote;
+	struct cam_axi_vote             req_axi_vote[CAM_VFE_TOP_VER3_MUX_MAX];
+	unsigned long                   req_clk_rate[CAM_VFE_TOP_VER3_MUX_MAX];
+	struct cam_axi_vote             last_vote[CAM_CPAS_HANDLE_MAX]
+					[CAM_VFE_TOP_VER3_MUX_MAX *
+					CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES];
+	uint32_t                        last_counter[CAM_CPAS_HANDLE_MAX];
+	enum cam_vfe_bw_control_action
+		axi_vote_control[CAM_VFE_TOP_VER3_MUX_MAX];
+	enum cam_cpas_handle_id cpashdl_type[CAM_VFE_TOP_VER3_MUX_MAX];
+};
+
+static int cam_vfe_top_ver3_mux_get_base(struct cam_vfe_top_ver3_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	uint32_t                          size = 0;
+	uint32_t                          mem_base = 0;
+	struct cam_isp_hw_get_cmd_update *cdm_args  = cmd_args;
+	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
+
+	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+		CAM_ERR(CAM_ISP, "Error, Invalid cmd size");
+		return -EINVAL;
+	}
+
+	if (!cdm_args || !cdm_args->res || !top_priv ||
+		!top_priv->common_data.soc_info) {
+		CAM_ERR(CAM_ISP, "Error, Invalid args");
+		return -EINVAL;
+	}
+
+	cdm_util_ops =
+		(struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+	if (!cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Invalid CDM ops");
+		return -EINVAL;
+	}
+
+	size = cdm_util_ops->cdm_required_size_changebase();
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > cdm_args->cmd.size) {
+		CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
+			cdm_args->cmd.size, size);
+		return -EINVAL;
+	}
+
+	mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(
+		top_priv->common_data.soc_info, VFE_CORE_BASE_IDX);
+	CAM_DBG(CAM_ISP, "core %d mem_base 0x%x",
+		top_priv->common_data.soc_info->index, mem_base);
+
+	cdm_util_ops->cdm_write_changebase(
+	cdm_args->cmd.cmd_buf_addr, mem_base);
+	cdm_args->cmd.used_bytes = (size * 4);
+
+	return 0;
+}
+
+static int cam_vfe_top_ver3_set_hw_clk_rate(
+	struct cam_vfe_top_ver3_priv *top_priv)
+{
+	struct cam_hw_soc_info        *soc_info = NULL;
+	int                            i, rc = 0;
+	unsigned long                  max_clk_rate = 0;
+
+	soc_info = top_priv->common_data.soc_info;
+
+	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
+		if (top_priv->req_clk_rate[i] > max_clk_rate)
+			max_clk_rate = top_priv->req_clk_rate[i];
+	}
+	if (max_clk_rate == top_priv->hw_clk_rate)
+		return 0;
+
+	CAM_DBG(CAM_ISP, "VFE: Clock name=%s idx=%d clk=%llu",
+		soc_info->clk_name[soc_info->src_clk_idx],
+		soc_info->src_clk_idx, max_clk_rate);
+
+	rc = cam_soc_util_set_src_clk_rate(soc_info, max_clk_rate);
+
+	if (!rc)
+		top_priv->hw_clk_rate = max_clk_rate;
+	else
+		CAM_ERR(CAM_ISP, "Set Clock rate failed, rc=%d", rc);
+
+	return rc;
+}
+
+static int cam_vfe_top_ver3_set_axi_bw_vote(
+	struct cam_vfe_top_ver3_priv *top_priv,
+	bool start_stop)
+{
+	struct cam_axi_vote sum = {0, 0};
+	struct cam_axi_vote to_be_applied_axi_vote = {0, 0};
+	int i, rc = 0;
+	struct cam_hw_soc_info   *soc_info =
+		top_priv->common_data.soc_info;
+	struct cam_vfe_soc_private *soc_private =
+		soc_info->soc_private;
+	bool apply_bw_update = false;
+	enum cam_cpas_handle_id cpashdl_type;
+	struct cam_axi_vote *last_vote = NULL;
+
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error soc_private NULL");
+		return -EINVAL;
+	}
+
+	for (cpashdl_type = 0; cpashdl_type < CAM_CPAS_HANDLE_MAX;
+		cpashdl_type++) {
+
+		if ((soc_private->cpas_version != CAM_CPAS_TITAN_480_V100)
+			&& cpashdl_type)
+			continue;
+
+		sum.uncompressed_bw = sum.compressed_bw = 0;
+		to_be_applied_axi_vote.uncompressed_bw = 0;
+		to_be_applied_axi_vote.compressed_bw = 0;
+		apply_bw_update = false;
+
+		for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
+			if (top_priv->axi_vote_control[i] ==
+				CAM_VFE_BW_CONTROL_INCLUDE &&
+				top_priv->cpashdl_type[i] ==
+				cpashdl_type) {
+				sum.uncompressed_bw +=
+				top_priv->req_axi_vote[i].uncompressed_bw;
+				sum.compressed_bw +=
+				top_priv->req_axi_vote[i].compressed_bw;
+			}
+		}
+
+		CAM_DBG(CAM_ISP, "Updating BW from (%llu %llu) to (%llu %llu)",
+			top_priv->applied_axi_vote.uncompressed_bw,
+			top_priv->applied_axi_vote.compressed_bw,
+			sum.uncompressed_bw,
+			sum.compressed_bw);
+
+		last_vote = top_priv->last_vote[cpashdl_type];
+
+		last_vote[top_priv->last_counter[cpashdl_type]] = sum;
+		top_priv->last_counter[cpashdl_type] =
+			(top_priv->last_counter[cpashdl_type] + 1) %
+			(CAM_VFE_TOP_VER3_MUX_MAX *
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
+
+		if ((top_priv->applied_axi_vote.uncompressed_bw ==
+			sum.uncompressed_bw) &&
+			(top_priv->applied_axi_vote.compressed_bw ==
+			sum.compressed_bw)) {
+			CAM_DBG(CAM_ISP, "BW config unchanged %llu %llu",
+				top_priv->applied_axi_vote.uncompressed_bw,
+				top_priv->applied_axi_vote.compressed_bw);
+			return 0;
+		}
+
+		if (start_stop) {
+			rc = cam_cpas_update_axi_vote(
+				soc_private->cpas_handle[cpashdl_type],
+				&to_be_applied_axi_vote);
+			if (!rc) {
+				top_priv->applied_axi_vote.uncompressed_bw =
+					to_be_applied_axi_vote.uncompressed_bw;
+				top_priv->applied_axi_vote.compressed_bw =
+					to_be_applied_axi_vote.compressed_bw;
+			}
+			return rc;
+		}
+
+		/*
+		 * Find max bw request in last few frames. This is the bw
+		 * that we want to vote to CPAS now.
+		 */
+		for (i = 0; i < (CAM_VFE_TOP_VER3_MUX_MAX *
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES); i++) {
+			if (to_be_applied_axi_vote.compressed_bw <
+				last_vote[i].compressed_bw)
+				to_be_applied_axi_vote.compressed_bw =
+					last_vote[i].compressed_bw;
+
+			if (to_be_applied_axi_vote.uncompressed_bw <
+					last_vote[i].uncompressed_bw)
+				to_be_applied_axi_vote.uncompressed_bw =
+					last_vote[i].uncompressed_bw;
+		}
+
+		if ((to_be_applied_axi_vote.uncompressed_bw !=
+			top_priv->applied_axi_vote.uncompressed_bw) ||
+			(to_be_applied_axi_vote.compressed_bw !=
+			top_priv->applied_axi_vote.compressed_bw))
+			apply_bw_update = true;
+
+		CAM_DBG(CAM_ISP, "apply_bw_update=%d", apply_bw_update);
+
+		if (apply_bw_update) {
+			rc = cam_cpas_update_axi_vote(
+				soc_private->cpas_handle[cpashdl_type],
+				&to_be_applied_axi_vote);
+			if (!rc) {
+				top_priv->applied_axi_vote.uncompressed_bw =
+				to_be_applied_axi_vote.uncompressed_bw;
+				top_priv->applied_axi_vote.compressed_bw =
+					to_be_applied_axi_vote.compressed_bw;
+			} else {
+				CAM_ERR(CAM_ISP, "BW request failed, rc=%d",
+					rc);
+			}
+		}
+	}
+	return rc;
+}
+
+static int cam_vfe_top_fs_update(
+	struct cam_vfe_top_ver3_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_vfe_fe_update_args *cmd_update = cmd_args;
+
+	if (cmd_update->node_res->process_cmd)
+		return cmd_update->node_res->process_cmd(cmd_update->node_res,
+			CAM_ISP_HW_CMD_FE_UPDATE_IN_RD, cmd_args, arg_size);
+
+	return 0;
+}
+
+static int cam_vfe_top_ver3_clock_update(
+	struct cam_vfe_top_ver3_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_vfe_clock_update_args     *clk_update = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   i, rc = 0;
+
+	clk_update =
+		(struct cam_vfe_clock_update_args *)cmd_args;
+	res = clk_update->node_res;
+
+	if (!res || !res->hw_intf->hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid input res %pK", res);
+		return -EINVAL;
+	}
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
+		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+			top_priv->req_clk_rate[i] = clk_update->clk_rate;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_DBG(CAM_ISP,
+			"VFE:%d Not ready to set clocks yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else
+		rc = cam_vfe_top_ver3_set_hw_clk_rate(top_priv);
+
+	return rc;
+}
+
+static int cam_vfe_top_ver3_bw_update(
+	struct cam_vfe_top_ver3_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_vfe_bw_update_args        *bw_update = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   rc = 0;
+	int                                   i;
+
+	bw_update = (struct cam_vfe_bw_update_args *)cmd_args;
+	res = bw_update->node_res;
+
+	if (!res || !res->hw_intf->hw_priv)
+		return -EINVAL;
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
+		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+			top_priv->req_axi_vote[i].uncompressed_bw =
+				bw_update->camnoc_bw_bytes;
+			top_priv->req_axi_vote[i].compressed_bw =
+				bw_update->external_bw_bytes;
+			top_priv->axi_vote_control[i] =
+				CAM_VFE_BW_CONTROL_INCLUDE;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"VFE:%d Not ready to set BW yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else
+		rc = cam_vfe_top_ver3_set_axi_bw_vote(top_priv, false);
+
+	return rc;
+}
+
+static int cam_vfe_top_ver3_bw_control(
+	struct cam_vfe_top_ver3_priv *top_priv,
+	 void *cmd_args, uint32_t arg_size)
+{
+	struct cam_vfe_bw_control_args       *bw_ctrl = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   rc = 0;
+	int                                   i;
+
+	bw_ctrl = (struct cam_vfe_bw_control_args *)cmd_args;
+	res = bw_ctrl->node_res;
+
+	if (!res || !res->hw_intf->hw_priv)
+		return -EINVAL;
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
+		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+			top_priv->axi_vote_control[i] = bw_ctrl->action;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"VFE:%d Not ready to set BW yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else {
+		rc = cam_vfe_top_ver3_set_axi_bw_vote(top_priv, true);
+	}
+
+	return rc;
+}
+
+static int cam_vfe_top_ver3_mux_get_reg_update(
+	struct cam_vfe_top_ver3_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_isp_hw_get_cmd_update  *cmd_update = cmd_args;
+
+	if (cmd_update->res->process_cmd)
+		return cmd_update->res->process_cmd(cmd_update->res,
+			CAM_ISP_HW_CMD_GET_REG_UPDATE, cmd_args, arg_size);
+
+	return -EINVAL;
+}
+
+int cam_vfe_top_ver3_get_hw_caps(void *device_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_top_ver3_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver3_priv   *top_priv = device_priv;
+
+	top_priv->hw_clk_rate = 0;
+
+	return 0;
+}
+
+int cam_vfe_top_ver3_reset(void *device_priv,
+	void *reset_core_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver3_priv   *top_priv = device_priv;
+	struct cam_hw_soc_info         *soc_info = NULL;
+	struct cam_vfe_top_ver3_reg_offset_common *reg_common = NULL;
+	uint32_t *reset_reg_args = reset_core_args;
+	uint32_t reset_reg_val;
+
+	if (!top_priv || !reset_reg_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	switch (*reset_reg_args) {
+	case CAM_VFE_HW_RESET_HW_AND_REG:
+		reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+		break;
+	default:
+		reset_reg_val = CAM_VFE_HW_RESET_HW_VAL;
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "reset reg value: %x", reset_reg_val);
+	soc_info = top_priv->common_data.soc_info;
+	reg_common = top_priv->common_data.common_reg;
+
+	/* Mask All the IRQs except RESET */
+	cam_io_w_mb((1 << 31),
+		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + 0x5C);
+
+	/* Reset HW */
+	cam_io_w_mb(reset_reg_val,
+		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) +
+		reg_common->global_reset_cmd);
+
+	CAM_DBG(CAM_ISP, "Reset HW exit");
+	return 0;
+}
+
+int cam_vfe_top_ver3_reserve(void *device_priv,
+	void *reserve_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver3_priv            *top_priv;
+	struct cam_vfe_acquire_args             *args;
+	struct cam_vfe_hw_vfe_in_acquire_args   *acquire_args;
+	uint32_t i;
+	int rc = -EINVAL;
+
+	if (!device_priv || !reserve_args) {
+		CAM_ERR(CAM_ISP, "Error, Invalid input arguments");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver3_priv   *)device_priv;
+	args = (struct cam_vfe_acquire_args *)reserve_args;
+	acquire_args = &args->vfe_in;
+
+
+	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
+		if (top_priv->mux_rsrc[i].res_id ==  acquire_args->res_id &&
+			top_priv->mux_rsrc[i].res_state ==
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+
+			if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
+				rc = cam_vfe_camif_ver3_acquire_resource(
+					&top_priv->mux_rsrc[i],
+					args);
+				if (rc)
+					break;
+			}
+
+			if (acquire_args->res_id ==
+				CAM_ISP_HW_VFE_IN_CAMIF_LITE) {
+				rc = cam_vfe_camif_lite_ver2_acquire_resource(
+					&top_priv->mux_rsrc[i],
+					args);
+				if (rc)
+					break;
+			}
+
+			if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_RD) {
+				rc = cam_vfe_fe_ver1_acquire_resource(
+					&top_priv->mux_rsrc[i],
+					args);
+				if (rc)
+					break;
+			}
+
+			top_priv->mux_rsrc[i].cdm_ops = acquire_args->cdm_ops;
+			top_priv->mux_rsrc[i].tasklet_info = args->tasklet;
+			top_priv->mux_rsrc[i].res_state =
+				CAM_ISP_RESOURCE_STATE_RESERVED;
+			acquire_args->rsrc_node =
+				&top_priv->mux_rsrc[i];
+
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+
+}
+
+int cam_vfe_top_ver3_release(void *device_priv,
+	void *release_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver3_priv            *top_priv;
+	struct cam_isp_resource_node            *mux_res;
+
+	if (!device_priv || !release_args) {
+		CAM_ERR(CAM_ISP, "Error, Invalid input arguments");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver3_priv   *)device_priv;
+	mux_res = (struct cam_isp_resource_node *)release_args;
+
+	CAM_DBG(CAM_ISP, "Resource in state %d", mux_res->res_state);
+	if (mux_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "Error, Resource in Invalid res_state :%d",
+			mux_res->res_state);
+		return -EINVAL;
+	}
+	mux_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	return 0;
+}
+
+int cam_vfe_top_ver3_start(void *device_priv,
+	void *start_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver3_priv            *top_priv;
+	struct cam_isp_resource_node            *mux_res;
+	struct cam_hw_info                      *hw_info = NULL;
+	int rc = 0;
+
+	if (!device_priv || !start_args) {
+		CAM_ERR(CAM_ISP, "Error, Invalid input arguments");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver3_priv *)device_priv;
+	mux_res = (struct cam_isp_resource_node *)start_args;
+	hw_info = (struct cam_hw_info  *)mux_res->hw_intf->hw_priv;
+
+	if (hw_info->hw_state == CAM_HW_STATE_POWER_UP) {
+		rc = cam_vfe_top_ver3_set_hw_clk_rate(top_priv);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"set_hw_clk_rate failed, rc=%d", rc);
+			return rc;
+		}
+
+		rc = cam_vfe_top_ver3_set_axi_bw_vote(top_priv, true);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"set_axi_bw_vote failed, rc=%d", rc);
+			return rc;
+		}
+
+		if (mux_res->start) {
+			rc = mux_res->start(mux_res);
+		} else {
+			CAM_ERR(CAM_ISP,
+				"Invalid res id:%d", mux_res->res_id);
+			rc = -EINVAL;
+		}
+	} else {
+		CAM_ERR(CAM_ISP, "VFE HW not powered up");
+		rc = -EPERM;
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_ver3_stop(void *device_priv,
+	void *stop_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver3_priv            *top_priv;
+	struct cam_isp_resource_node            *mux_res;
+	struct cam_hw_info                      *hw_info = NULL;
+	int i, rc = 0;
+
+	if (!device_priv || !stop_args) {
+		CAM_ERR(CAM_ISP, "Error, Invalid input arguments");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver3_priv   *)device_priv;
+	mux_res = (struct cam_isp_resource_node *)stop_args;
+	hw_info = (struct cam_hw_info  *)mux_res->hw_intf->hw_priv;
+
+	if ((mux_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) ||
+		(mux_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF_LITE) ||
+		(mux_res->res_id == CAM_ISP_HW_VFE_IN_RD) ||
+		((mux_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0) &&
+		(mux_res->res_id <= CAM_ISP_HW_VFE_IN_RDI3))) {
+		rc = mux_res->stop(mux_res);
+	} else {
+		CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id);
+		return -EINVAL;
+	}
+
+	if (!rc) {
+		for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
+			if (top_priv->mux_rsrc[i].res_id == mux_res->res_id) {
+				top_priv->req_clk_rate[i] = 0;
+				top_priv->req_axi_vote[i].compressed_bw = 0;
+				top_priv->req_axi_vote[i].uncompressed_bw = 0;
+				top_priv->axi_vote_control[i] =
+					CAM_VFE_BW_CONTROL_EXCLUDE;
+				break;
+			}
+		}
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_ver3_read(void *device_priv,
+	void *read_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_top_ver3_write(void *device_priv,
+	void *write_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_top_ver3_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_vfe_top_ver3_priv            *top_priv;
+
+	if (!device_priv || !cmd_args) {
+		CAM_ERR(CAM_ISP, "Error, Invalid arguments");
+		return -EINVAL;
+	}
+	top_priv = (struct cam_vfe_top_ver3_priv *)device_priv;
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
+		rc = cam_vfe_top_ver3_mux_get_base(top_priv,
+			cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+		rc = cam_vfe_top_ver3_mux_get_reg_update(top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_CLOCK_UPDATE:
+		rc = cam_vfe_top_ver3_clock_update(top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_FE_UPDATE_IN_RD:
+		rc = cam_vfe_top_fs_update(top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_BW_UPDATE:
+		rc = cam_vfe_top_ver3_bw_update(top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_BW_CONTROL:
+		rc = cam_vfe_top_ver3_bw_control(top_priv, cmd_args, arg_size);
+		break;
+	default:
+		rc = -EINVAL;
+		CAM_ERR(CAM_ISP, "Error, Invalid cmd:%d", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_ver3_init(
+	struct cam_hw_soc_info                 *soc_info,
+	struct cam_hw_intf                     *hw_intf,
+	void                                   *top_hw_info,
+	struct cam_vfe_top                    **vfe_top_ptr)
+{
+	int i, j, rc = 0;
+	struct cam_vfe_top_ver3_priv           *top_priv = NULL;
+	struct cam_vfe_top_ver3_hw_info        *ver3_hw_info = top_hw_info;
+	struct cam_vfe_top                     *vfe_top;
+	struct cam_vfe_soc_private             *soc_private = NULL;
+
+	vfe_top = kzalloc(sizeof(struct cam_vfe_top), GFP_KERNEL);
+	if (!vfe_top) {
+		CAM_DBG(CAM_ISP, "Error, Failed to alloc for vfe_top");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	top_priv = kzalloc(sizeof(struct cam_vfe_top_ver3_priv),
+		GFP_KERNEL);
+	if (!top_priv) {
+		CAM_DBG(CAM_ISP, "Error, Failed to alloc for vfe_top_priv");
+		rc = -ENOMEM;
+		goto free_vfe_top;
+	}
+
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error, soc_private NULL");
+		rc = -ENODEV;
+		goto free_vfe_top_priv;
+	}
+	vfe_top->top_priv = top_priv;
+	top_priv->hw_clk_rate = 0;
+	top_priv->applied_axi_vote.compressed_bw = 0;
+	top_priv->applied_axi_vote.uncompressed_bw = 0;
+	memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
+		(CAM_VFE_TOP_VER3_MUX_MAX * CAM_CPAS_HANDLE_MAX *
+		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES));
+	top_priv->last_counter[0] = 0;
+	top_priv->last_counter[1] = 0;
+
+	for (i = 0, j = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
+		top_priv->mux_rsrc[i].res_type = CAM_ISP_RESOURCE_VFE_IN;
+		top_priv->mux_rsrc[i].hw_intf = hw_intf;
+		top_priv->mux_rsrc[i].res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		top_priv->req_clk_rate[i] = 0;
+		top_priv->req_axi_vote[i].compressed_bw = 0;
+		top_priv->req_axi_vote[i].uncompressed_bw = 0;
+		top_priv->axi_vote_control[i] = CAM_VFE_BW_CONTROL_EXCLUDE;
+
+
+		if (ver3_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_3_0) {
+			top_priv->mux_rsrc[i].res_id =
+				CAM_ISP_HW_VFE_IN_CAMIF;
+			top_priv->cpashdl_type[i] = CAM_CPAS_HANDLE_CAMIF;
+
+			rc = cam_vfe_camif_ver3_init(hw_intf, soc_info,
+				&ver3_hw_info->camif_hw_info,
+				&top_priv->mux_rsrc[i]);
+			if (rc)
+				goto deinit_resources;
+		} else if (ver3_hw_info->mux_type[i] ==
+			CAM_VFE_RDI_VER_1_0) {
+			/* set the RDI resource id */
+			top_priv->mux_rsrc[i].res_id =
+				CAM_ISP_HW_VFE_IN_RDI0 + j++;
+			if (soc_private->cpas_version ==
+				CAM_CPAS_TITAN_175_V120)
+				top_priv->cpashdl_type[i] =
+					CAM_CPAS_HANDLE_RAW;
+			else
+				top_priv->cpashdl_type[i] =
+					CAM_CPAS_HANDLE_CAMIF;
+
+			rc = cam_vfe_rdi_ver2_init(hw_intf, soc_info,
+				&ver3_hw_info->rdi_hw_info,
+				&top_priv->mux_rsrc[i]);
+			if (rc)
+				goto deinit_resources;
+		} else if (ver3_hw_info->mux_type[i] ==
+			CAM_VFE_IN_RD_VER_1_0) {
+			/* set the RD resource id */
+			top_priv->mux_rsrc[i].res_id =
+				CAM_ISP_HW_VFE_IN_RD;
+
+			rc = cam_vfe_fe_ver1_init(hw_intf, soc_info,
+				&ver3_hw_info->fe_hw_info,
+				&top_priv->mux_rsrc[i]);
+			if (rc)
+				goto deinit_resources;
+		} else {
+			CAM_WARN(CAM_ISP, "Invalid mux type: %u",
+				ver3_hw_info->mux_type[i]);
+		}
+	}
+
+	vfe_top->hw_ops.get_hw_caps = cam_vfe_top_ver3_get_hw_caps;
+	vfe_top->hw_ops.init        = cam_vfe_top_ver3_init_hw;
+	vfe_top->hw_ops.reset       = cam_vfe_top_ver3_reset;
+	vfe_top->hw_ops.reserve     = cam_vfe_top_ver3_reserve;
+	vfe_top->hw_ops.release     = cam_vfe_top_ver3_release;
+	vfe_top->hw_ops.start       = cam_vfe_top_ver3_start;
+	vfe_top->hw_ops.stop        = cam_vfe_top_ver3_stop;
+	vfe_top->hw_ops.read        = cam_vfe_top_ver3_read;
+	vfe_top->hw_ops.write       = cam_vfe_top_ver3_write;
+	vfe_top->hw_ops.process_cmd = cam_vfe_top_ver3_process_cmd;
+	*vfe_top_ptr = vfe_top;
+
+	top_priv->common_data.soc_info     = soc_info;
+	top_priv->common_data.hw_intf      = hw_intf;
+	top_priv->common_data.common_reg   = ver3_hw_info->common_reg;
+
+	return rc;
+
+deinit_resources:
+	for (--i; i >= 0; i--) {
+		if (ver3_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_3_0) {
+			if (cam_vfe_camif_ver3_deinit(&top_priv->mux_rsrc[i]))
+				CAM_ERR(CAM_ISP, "Camif Deinit failed");
+		} else if (ver3_hw_info->mux_type[i] ==
+			CAM_VFE_CAMIF_LITE_VER_2_0) {
+			if (cam_vfe_camif_lite_ver2_deinit(
+				&top_priv->mux_rsrc[i]))
+				CAM_ERR(CAM_ISP, "Camif lite deinit failed");
+		} else if (ver3_hw_info->mux_type[i] ==
+			CAM_ISP_HW_VFE_IN_RDI0) {
+			if (cam_vfe_rdi_ver2_init(hw_intf, soc_info,
+				&ver3_hw_info->rdi_hw_info,
+				&top_priv->mux_rsrc[i]))
+				CAM_ERR(CAM_ISP, "RDI deinit failed");
+		} else {
+			if (cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]))
+				CAM_ERR(CAM_ISP, "RDI Deinit failed");
+		}
+		top_priv->mux_rsrc[i].res_state =
+			CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+	}
+free_vfe_top_priv:
+	kfree(vfe_top->top_priv);
+free_vfe_top:
+	kfree(vfe_top);
+end:
+	return rc;
+}
+
+int cam_vfe_top_ver3_deinit(struct cam_vfe_top  **vfe_top_ptr)
+{
+	int i, rc = 0;
+	struct cam_vfe_top_ver3_priv           *top_priv = NULL;
+	struct cam_vfe_top                     *vfe_top;
+
+	if (!vfe_top_ptr) {
+		CAM_ERR(CAM_ISP, "Error, Invalid input");
+		return -EINVAL;
+	}
+
+	vfe_top = *vfe_top_ptr;
+	if (!vfe_top) {
+		CAM_ERR(CAM_ISP, "Error, vfe_top NULL");
+		return -ENODEV;
+	}
+
+	top_priv = vfe_top->top_priv;
+	if (!top_priv) {
+		CAM_ERR(CAM_ISP, "Error, vfe_top_priv NULL");
+		rc = -ENODEV;
+		goto free_vfe_top;
+	}
+
+	for (i = 0; i < CAM_VFE_TOP_VER3_MUX_MAX; i++) {
+		top_priv->mux_rsrc[i].res_state =
+			CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+		if (top_priv->mux_rsrc[i].res_type ==
+			CAM_VFE_CAMIF_VER_3_0) {
+			rc = cam_vfe_camif_ver3_deinit(&top_priv->mux_rsrc[i]);
+			if (rc)
+				CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d",
+					rc);
+		} else if (top_priv->mux_rsrc[i].res_type ==
+			CAM_VFE_CAMIF_LITE_VER_2_0) {
+			rc = cam_vfe_camif_lite_ver2_deinit(
+				&top_priv->mux_rsrc[i]);
+			if (rc)
+				CAM_ERR(CAM_ISP,
+					"Camif lite deinit failed rc=%d", rc);
+		} else if (top_priv->mux_rsrc[i].res_type ==
+			CAM_VFE_RDI_VER_1_0) {
+			rc = cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]);
+			if (rc)
+				CAM_ERR(CAM_ISP, "RDI deinit failed rc=%d", rc);
+		} else if (top_priv->mux_rsrc[i].res_type ==
+			CAM_VFE_IN_RD_VER_1_0) {
+			rc = cam_vfe_fe_ver1_deinit(&top_priv->mux_rsrc[i]);
+			if (rc)
+				CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d",
+					rc);
+		}
+	}
+
+	kfree(vfe_top->top_priv);
+
+free_vfe_top:
+	kfree(vfe_top);
+	*vfe_top_ptr = NULL;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
index 7d7d843..8c8d595 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
@@ -6,9 +6,12 @@
 #ifndef _CAM_VFE_TOP_VER3_H_
 #define _CAM_VFE_TOP_VER3_H_
 
+#include "cam_vfe_camif_ver3.h"
+#include "cam_vfe_camif_lite_ver3.h"
 #include "cam_vfe_camif_ver2.h"
 #include "cam_vfe_camif_lite_ver2.h"
 #include "cam_vfe_rdi.h"
+#include "cam_vfe_fe_ver1.h"
 
 #define CAM_VFE_TOP_VER3_MUX_MAX     6
 
@@ -40,9 +43,12 @@ struct cam_vfe_top_ver3_reg_offset_common {
 
 struct cam_vfe_top_ver3_hw_info {
 	struct cam_vfe_top_ver3_reg_offset_common  *common_reg;
-	struct cam_vfe_camif_ver2_hw_info           camif_hw_info;
-	struct cam_vfe_camif_lite_ver2_hw_info      camif_lite_hw_info;
-	struct cam_vfe_rdi_ver2_hw_info             rdi_hw_info;
+	struct cam_vfe_camif_ver3_hw_info           camif_hw_info;
+	struct cam_vfe_camif_lite_ver3_hw_info      pdlib_hw_info;
+	struct cam_vfe_camif_lite_ver3_hw_info
+		*rdi_hw_info[CAM_VFE_RDI_VER2_MAX];
+	struct cam_vfe_camif_lite_ver3_hw_info      lcr_hw_info;
+	struct cam_vfe_fe_ver1_hw_info              fe_hw_info;
 	uint32_t mux_type[CAM_VFE_TOP_VER3_MUX_MAX];
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
index 1a968cd..8e9a83e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
@@ -15,6 +15,7 @@
 
 #define CAM_VFE_CAMIF_VER_1_0 0x10
 #define CAM_VFE_CAMIF_VER_2_0 0x20
+#define CAM_VFE_CAMIF_VER_3_0 0x30
 
 #define CAM_VFE_CAMIF_LITE_VER_2_0 0x02
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 22dd601..a4eff25 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -492,8 +492,6 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
 				continue;
 			}
 
-			trace_cam_req_mgr_apply_request(link, &apply_req, dev);
-
 			apply_req.trigger_point = trigger;
 			CAM_DBG(CAM_REQ,
 				"SEND: link_hdl: %x pd %d req_id %lld",
@@ -506,6 +504,7 @@ static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
 				if (pd == link->max_delay)
 					link->open_req_cnt--;
 			}
+			trace_cam_req_mgr_apply_request(link, &apply_req, dev);
 		}
 	}
 	if (rc < 0) {
@@ -637,7 +636,7 @@ static int __cam_req_mgr_check_sync_for_mslave(
 {
 	struct cam_req_mgr_core_link *sync_link = NULL;
 	struct cam_req_mgr_slot      *sync_slot = NULL;
-	int sync_slot_idx = 0, prev_idx, next_idx, rd_idx, rc = 0;
+	int sync_slot_idx = 0, prev_idx, next_idx, rd_idx, sync_rd_idx, rc = 0;
 	int64_t req_id = 0, sync_req_id = 0;
 
 	if (!link->sync_link) {
@@ -647,6 +646,7 @@ static int __cam_req_mgr_check_sync_for_mslave(
 
 	sync_link = link->sync_link;
 	req_id = slot->req_id;
+	sync_rd_idx = sync_link->req.in_q->rd_idx;
 
 	CAM_DBG(CAM_CRM,
 		"link_hdl %x req %lld frame_skip_flag %d open_req_cnt:%d initial_sync_req [%lld,%lld] is_master:%d",
@@ -663,6 +663,19 @@ static int __cam_req_mgr_check_sync_for_mslave(
 		return -EAGAIN;
 	}
 
+	if (link->in_msync_mode &&
+		sync_link->in_msync_mode &&
+		(req_id - sync_link->req.in_q->slot[sync_rd_idx].req_id >
+		link->max_delay - sync_link->max_delay)) {
+		CAM_DBG(CAM_CRM,
+			"Req: %lld on link:%x need to hold for link: %x req:%d",
+			req_id,
+			link->link_hdl,
+			sync_link->link_hdl,
+			sync_link->req.in_q->slot[sync_rd_idx].req_id);
+		return -EINVAL;
+	}
+
 	if (link->is_master) {
 		rc = __cam_req_mgr_inject_delay(link->req.l_tbl, slot->idx);
 		if (rc) {
@@ -1001,8 +1014,10 @@ static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
 					link->link_hdl);
 				link->in_msync_mode = false;
 				link->initial_sync_req = -1;
-				if (link->sync_link)
+				if (link->sync_link) {
 					link->sync_link->initial_sync_req = -1;
+					link->sync_link->in_msync_mode = false;
+				}
 			}
 
 			rc = __cam_req_mgr_inject_delay(link->req.l_tbl,
@@ -1755,6 +1770,8 @@ int cam_req_mgr_process_sched_req(void *priv, void *data)
 			link->initial_sync_req = slot->req_id;
 	} else {
 		link->initial_sync_req = -1;
+		if (link->sync_link)
+			link->sync_link->initial_sync_req = -1;
 	}
 
 	mutex_unlock(&link->req.lock);
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index e152a45d..5342de0 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/init.h>
@@ -426,6 +426,7 @@ static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
 
 static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
 {
+	int rc = 0;
 	struct cam_sync_signal sync_signal;
 
 	if (k_ioctl->size != sizeof(struct cam_sync_signal))
@@ -440,7 +441,14 @@ static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
 		return -EFAULT;
 
 	/* need to get ref for UMD signaled fences */
-	cam_sync_get_obj_ref(sync_signal.sync_obj);
+	rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
+	if (rc) {
+		CAM_DBG(CAM_SYNC,
+			"Error: cannot signal an uninitialized sync obj = %d",
+			sync_signal.sync_obj);
+		return rc;
+	}
+
 	return cam_sync_signal(sync_signal.sync_obj,
 		sync_signal.sync_state);
 }
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index aac820b..faa9e82 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -3209,6 +3209,7 @@ static int __power_collapse(struct venus_hfi_device *device, bool force)
 {
 	int rc = 0;
 	u32 wfi_status = 0, idle_status = 0, pc_ready = 0;
+	u32 ctrl_status = 0;
 	u32 flags = 0;
 	int count = 0;
 	const int max_tries = 10;
@@ -3223,10 +3224,8 @@ static int __power_collapse(struct venus_hfi_device *device, bool force)
 		goto exit;
 	}
 
-	rc = __core_in_valid_state(device);
-	if (!rc) {
-		dprintk(VIDC_WARN,
-				"Core is in bad state, Skipping power collapse\n");
+	if (!__core_in_valid_state(device)) {
+		dprintk(VIDC_WARN, "%s - Core not in init state\n", __func__);
 		return -EINVAL;
 	}
 
@@ -3236,39 +3235,34 @@ static int __power_collapse(struct venus_hfi_device *device, bool force)
 	else if (rc)
 		goto skip_power_off;
 
-	pc_ready = __read_register(device, VIDC_CTRL_STATUS) &
-		VIDC_CTRL_STATUS_PC_READY;
+	ctrl_status = __read_register(device, VIDC_CTRL_STATUS);
+	pc_ready = ctrl_status & VIDC_CTRL_STATUS_PC_READY;
+	idle_status = ctrl_status & BIT(30);
+
 	if (!pc_ready) {
-		wfi_status = __read_register(device,
-				VIDC_WRAPPER_TZ_CPU_STATUS);
-		idle_status = __read_register(device,
-				VIDC_CTRL_STATUS);
-		if (!(wfi_status & BIT(0))) {
+		wfi_status = BIT(0) &
+				__read_register(device,
+					VIDC_WRAPPER_TZ_CPU_STATUS);
+		if (!wfi_status) {
 			dprintk(VIDC_WARN,
-				"Skipping PC as wfi_status (%#x) bit not set\n",
-				wfi_status);
-			goto skip_power_off;
-		}
-		if (!(idle_status & BIT(30))) {
-			dprintk(VIDC_WARN,
-				"Skipping PC as idle_status (%#x) bit not set\n",
-				idle_status);
+				"Skipping PC, wfi_status not set.\n");
 			goto skip_power_off;
 		}
 
 		rc = __prepare_pc(device);
 		if (rc) {
-			dprintk(VIDC_WARN, "Failed PC %d\n", rc);
+			dprintk(VIDC_WARN, "Failed __prepare_pc %d\n", rc);
 			goto skip_power_off;
 		}
 
 		while (count < max_tries) {
-			wfi_status = __read_register(device,
+			wfi_status = BIT(0) &
+					__read_register(device,
 					VIDC_WRAPPER_TZ_CPU_STATUS);
-			pc_ready = __read_register(device,
+			ctrl_status = __read_register(device,
 					VIDC_CTRL_STATUS);
-			if ((wfi_status & BIT(0)) && (pc_ready &
-				VIDC_CTRL_STATUS_PC_READY))
+			if (wfi_status &&
+				(ctrl_status & VIDC_CTRL_STATUS_PC_READY))
 				break;
 			usleep_range(150, 250);
 			count++;
@@ -3276,8 +3270,7 @@ static int __power_collapse(struct venus_hfi_device *device, bool force)
 
 		if (count == max_tries) {
 			dprintk(VIDC_ERR,
-					"Skip PC. Core is not in right state (%#x, %#x)\n",
-					wfi_status, pc_ready);
+				"Skip PC. Core is not in right state.\n");
 			goto skip_power_off;
 		}
 	}
@@ -3292,8 +3285,8 @@ static int __power_collapse(struct venus_hfi_device *device, bool force)
 	return rc;
 
 skip_power_off:
-	dprintk(VIDC_WARN, "Skip PC(%#x, %#x, %#x)\n",
-		wfi_status, idle_status, pc_ready);
+	dprintk(VIDC_WARN, "Skip PC, wfi=%#x, idle=%#x, pcr=%#x, ctrl=%#x)\n",
+		wfi_status, idle_status, pc_ready, ctrl_status);
 
 	return -EAGAIN;
 }
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 6481212..988ec01 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -493,7 +493,11 @@ static int get_v4l2_plane32(struct v4l2_plane __user *p64,
 
 	if (copy_in_user(p64, p32, 2 * sizeof(__u32)) ||
 	    copy_in_user(&p64->data_offset, &p32->data_offset,
-			 sizeof(p64->data_offset)))
+			 sizeof(p64->data_offset)) ||
+	    copy_in_user(p64->reserved, p32->reserved,
+			 sizeof(p64->reserved)) ||
+	    copy_in_user(&p64->length, &p32->length,
+			 sizeof(p64->length)))
 		return -EFAULT;
 
 	switch (memory) {
@@ -525,7 +529,9 @@ static int put_v4l2_plane32(struct v4l2_plane __user *p64,
 
 	if (copy_in_user(p32, p64, 2 * sizeof(__u32)) ||
 	    copy_in_user(&p32->data_offset, &p64->data_offset,
-			 sizeof(p64->data_offset)))
+			 sizeof(p64->data_offset)) ||
+	    copy_in_user(p32->reserved, p64->reserved,
+			 sizeof(p64->reserved)))
 		return -EFAULT;
 
 	switch (memory) {
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 55af04f..6c8dcb6 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1441,6 +1441,9 @@ static void nic_remove(struct pci_dev *pdev)
 {
 	struct nicpf *nic = pci_get_drvdata(pdev);
 
+	if (!nic)
+		return;
+
 	if (nic->flags & NIC_SRIOV_ENABLED)
 		pci_disable_sriov(pdev);
 
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 14374a8..6127697 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -914,10 +914,8 @@ static int hip04_mac_probe(struct platform_device *pdev)
 	}
 
 	ret = register_netdev(ndev);
-	if (ret) {
-		free_netdev(ndev);
+	if (ret)
 		goto alloc_fail;
-	}
 
 	return 0;
 
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index e2f80cc..0d2de6f 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -231,7 +231,7 @@ struct emac_regs {
 #define EMAC_STACR_PHYE			0x00004000
 #define EMAC_STACR_STAC_MASK		0x00003000
 #define EMAC_STACR_STAC_READ		0x00001000
-#define EMAC_STACR_STAC_WRITE		0x00000800
+#define EMAC_STACR_STAC_WRITE		0x00002000
 #define EMAC_STACR_OPBC_MASK		0x00000C00
 #define EMAC_STACR_OPBC_50		0x00000000
 #define EMAC_STACR_OPBC_66		0x00000400
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index c54ebed..c393cb2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -842,6 +842,7 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
 		nvm_word = E1000_INVM_DEFAULT_AL;
 	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
 	igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
+	phy_word = E1000_PHY_PLL_UNCONF;
 	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
 		/* check current state directly from internal PHY */
 		igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index a8148c7..9772016 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -2248,7 +2248,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
 		*autoneg = false;
 
 		if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
-		    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+		    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+		    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+		    hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
 			*speed = IXGBE_LINK_SPEED_1GB_FULL;
 			return 0;
 		}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f11b450..d290f07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1084,8 +1084,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
 
 	tx_pause = !!(pause->tx_pause);
 	rx_pause = !!(pause->rx_pause);
-	rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
-	tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
+	rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp;
+	tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp;
 
 	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
 				    priv->rx_skb_size + ETH_FCS_LEN,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fe49384..0d7fd3f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3494,8 +3494,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 		dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
 	}
 
-	/* MTU range: 46 - hw-specific max */
-	dev->min_mtu = MLX4_EN_MIN_MTU;
+	/* MTU range: 68 - hw-specific max */
+	dev->min_mtu = ETH_MIN_MTU;
 	dev->max_mtu = priv->max_mtu;
 
 	mdev->pndev[port] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index c3228b8..240f9c9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -161,7 +161,6 @@
 #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
 				  ETH_HLEN + PREAMBLE_LEN)
 
-#define MLX4_EN_MIN_MTU		46
 /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
  * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
  */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 263e277..3da56f4 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * RMNET Data ingress/egress handler
  *
@@ -45,13 +45,15 @@ EXPORT_TRACEPOINT_SYMBOL(rmnet_err);
 
 static int rmnet_check_skb_can_gro(struct sk_buff *skb)
 {
+	unsigned char *data = rmnet_map_data_ptr(skb);
+
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
-		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+		if (((struct iphdr *)data)->protocol == IPPROTO_TCP)
 			return 0;
 		break;
 	case htons(ETH_P_IPV6):
-		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+		if (((struct ipv6hdr *)data)->nexthdr == IPPROTO_TCP)
 			return 0;
 		/* Fall through */
 	}
@@ -61,7 +63,7 @@ static int rmnet_check_skb_can_gro(struct sk_buff *skb)
 
 void rmnet_set_skb_proto(struct sk_buff *skb)
 {
-	switch (skb->data[0] & 0xF0) {
+	switch (rmnet_map_data_ptr(skb)[0] & 0xF0) {
 	case RMNET_IP_VERSION_4:
 		skb->protocol = htons(ETH_P_IP);
 		break;
@@ -129,11 +131,13 @@ static void
 __rmnet_map_ingress_handler(struct sk_buff *skb,
 			    struct rmnet_port *port)
 {
+	struct rmnet_map_header *qmap;
 	struct rmnet_endpoint *ep;
 	u16 len, pad;
 	u8 mux_id;
 
-	if (RMNET_MAP_GET_CD_BIT(skb)) {
+	qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
+	if (qmap->cd_bit) {
 		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
 			if (!rmnet_map_flow_command(skb, port, false))
 				return;
@@ -145,9 +149,9 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
 		goto free_skb;
 	}
 
-	mux_id = RMNET_MAP_GET_MUX_ID(skb);
-	pad = RMNET_MAP_GET_PAD(skb);
-	len = RMNET_MAP_GET_LENGTH(skb) - pad;
+	mux_id = qmap->mux_id;
+	pad = qmap->pad_len;
+	len = ntohs(qmap->pkt_len) - pad;
 
 	if (mux_id >= RMNET_MAX_LOGICAL_EP)
 		goto free_skb;
@@ -159,7 +163,7 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
 	skb->dev = ep->egress_dev;
 
 	/* Subtract MAP header */
-	skb_pull(skb, sizeof(struct rmnet_map_header));
+	pskb_pull(skb, sizeof(struct rmnet_map_header));
 	rmnet_set_skb_proto(skb);
 
 	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
@@ -172,7 +176,7 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
 		qmi_rmnet_work_maybe_restart(port);
 #endif
 
-	skb_trim(skb, len);
+	pskb_trim(skb, len);
 	rmnet_deliver_skb(skb, port);
 	return;
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 645c3d5..ca7600c 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. */
 
 #ifndef _RMNET_MAP_H_
 #define _RMNET_MAP_H_
@@ -134,6 +134,24 @@ struct rmnet_map_dl_ind {
 #define RMNET_MAP_NO_PAD_BYTES        0
 #define RMNET_MAP_ADD_PAD_BYTES       1
 
+static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb)
+{
+	/* Nonlinear packets we receive are entirely within frag 0 */
+	if (skb_is_nonlinear(skb) && skb->len == skb->data_len)
+		return skb_frag_address(skb_shinfo(skb)->frags);
+
+	return skb->data;
+}
+
+static inline struct rmnet_map_control_command *
+rmnet_map_get_cmd_start(struct sk_buff *skb)
+{
+	unsigned char *data = rmnet_map_data_ptr(skb);
+
+	data += sizeof(struct rmnet_map_header);
+	return (struct rmnet_map_control_command *)data;
+}
+
 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
 				      struct rmnet_port *port);
 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index e9b0ddc..cb8bdf5 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. */
 
 #include <linux/netdevice.h>
 #include "rmnet_config.h"
@@ -22,6 +22,7 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
 				    struct rmnet_port *port,
 				    int enable)
 {
+	struct rmnet_map_header *qmap;
 	struct rmnet_map_control_command *cmd;
 	struct rmnet_endpoint *ep;
 	struct net_device *vnd;
@@ -31,8 +32,9 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
 	u8 mux_id;
 	int r;
 
-	mux_id = RMNET_MAP_GET_MUX_ID(skb);
-	cmd = RMNET_MAP_GET_CMD_START(skb);
+	qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
+	mux_id = qmap->mux_id;
+	cmd = rmnet_map_get_cmd_start(skb);
 
 	if (mux_id >= RMNET_MAX_LOGICAL_EP) {
 		kfree_skb(skb);
@@ -72,12 +74,12 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
 	struct net_device *dev = skb->dev;
 
 	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
-		skb_trim(skb,
-			 skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
+		pskb_trim(skb,
+			  skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
 
 	skb->protocol = htons(ETH_P_MAP);
 
-	cmd = RMNET_MAP_GET_CMD_START(skb);
+	cmd = rmnet_map_get_cmd_start(skb);
 	cmd->cmd_type = type & 0x03;
 
 	netif_tx_lock(dev);
@@ -122,9 +124,9 @@ static void rmnet_map_process_flow_start(struct sk_buff *skb,
 	if (skb->len < RMNET_DL_IND_HDR_SIZE)
 		return;
 
-	skb_pull(skb, RMNET_MAP_CMD_SIZE);
+	pskb_pull(skb, RMNET_MAP_CMD_SIZE);
 
-	dlhdr = (struct rmnet_map_dl_ind_hdr *)skb->data;
+	dlhdr = (struct rmnet_map_dl_ind_hdr *)rmnet_map_data_ptr(skb);
 
 	port->stats.dl_hdr_last_seq = dlhdr->le.seq;
 	port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
@@ -150,7 +152,7 @@ static void rmnet_map_process_flow_start(struct sk_buff *skb,
 		pull_size = sizeof(struct rmnet_map_dl_ind_hdr);
 		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
 			pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
-		skb_pull(skb, pull_size);
+		pskb_pull(skb, pull_size);
 	}
 }
 
@@ -163,9 +165,9 @@ static void rmnet_map_process_flow_end(struct sk_buff *skb,
 	if (skb->len < RMNET_DL_IND_TRL_SIZE)
 		return;
 
-	skb_pull(skb, RMNET_MAP_CMD_SIZE);
+	pskb_pull(skb, RMNET_MAP_CMD_SIZE);
 
-	dltrl = (struct rmnet_map_dl_ind_trl *)skb->data;
+	dltrl = (struct rmnet_map_dl_ind_trl *)rmnet_map_data_ptr(skb);
 
 	port->stats.dl_trl_last_seq = dltrl->seq_le;
 	port->stats.dl_trl_count++;
@@ -177,7 +179,7 @@ static void rmnet_map_process_flow_end(struct sk_buff *skb,
 		pull_size = sizeof(struct rmnet_map_dl_ind_trl);
 		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
 			pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
-		skb_pull(skb, pull_size);
+		pskb_pull(skb, pull_size);
 	}
 }
 
@@ -190,7 +192,7 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
 	unsigned char command_name;
 	unsigned char rc = 0;
 
-	cmd = RMNET_MAP_GET_CMD_START(skb);
+	cmd = rmnet_map_get_cmd_start(skb);
 	command_name = cmd->command_name;
 
 	switch (command_name) {
@@ -217,7 +219,7 @@ int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port,
 	struct rmnet_map_control_command *cmd;
 	unsigned char command_name;
 
-	cmd = RMNET_MAP_GET_CMD_START(skb);
+	cmd = rmnet_map_get_cmd_start(skb);
 	command_name = cmd->command_name;
 
 	switch (command_name) {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 6b2def2..426aeca 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  *
  * RMNET Data MAP protocol
  *
@@ -51,14 +51,14 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
 	void *txporthdr;
 	__be16 addend;
 
-	ip4h = (struct iphdr *)(skb->data);
+	ip4h = (struct iphdr *)rmnet_map_data_ptr(skb);
 	if ((ntohs(ip4h->frag_off) & IP_MF) ||
 	    ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
 		priv->stats.csum_fragmented_pkt++;
 		return -EOPNOTSUPP;
 	}
 
-	txporthdr = skb->data + ip4h->ihl * 4;
+	txporthdr = rmnet_map_data_ptr(skb) + ip4h->ihl * 4;
 
 	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
 
@@ -122,12 +122,12 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
 	u16 csum_value, csum_value_final;
 	__be16 ip6_hdr_csum, addend;
 	struct ipv6hdr *ip6h;
-	void *txporthdr;
+	void *txporthdr, *data = rmnet_map_data_ptr(skb);
 	u32 length;
 
-	ip6h = (struct ipv6hdr *)(skb->data);
+	ip6h = data;
 
-	txporthdr = skb->data + sizeof(struct ipv6hdr);
+	txporthdr = data + sizeof(struct ipv6hdr);
 	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
 
 	if (!csum_field) {
@@ -138,7 +138,7 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
 	csum_value = ~ntohs(csum_trailer->csum_value);
 	ip6_hdr_csum = (__force __be16)
 			~ntohs((__force __be16)ip_compute_csum(ip6h,
-			       (int)(txporthdr - (void *)(skb->data))));
+			       (int)(txporthdr - data)));
 	ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
 				      ip6_hdr_csum);
 
@@ -311,12 +311,13 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
 {
 	struct rmnet_map_header *maph;
 	struct sk_buff *skbn;
+	unsigned char *data = rmnet_map_data_ptr(skb);
 	u32 packet_len;
 
 	if (skb->len == 0)
 		return NULL;
 
-	maph = (struct rmnet_map_header *)skb->data;
+	maph = (struct rmnet_map_header *)data;
 	packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
 
 	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
@@ -329,14 +330,30 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
 	if (ntohs(maph->pkt_len) == 0)
 		return NULL;
 
-	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
-	if (!skbn)
-		return NULL;
+	if (skb_is_nonlinear(skb)) {
+		skb_frag_t *frag0 = skb_shinfo(skb)->frags;
+		struct page *page = skb_frag_page(frag0);
 
-	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
-	skb_put(skbn, packet_len);
-	memcpy(skbn->data, skb->data, packet_len);
-	skb_pull(skb, packet_len);
+		skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC);
+		if (!skbn)
+			return NULL;
+
+		skb_append_pagefrags(skbn, page, frag0->page_offset,
+				     packet_len);
+		skbn->data_len += packet_len;
+		skbn->len += packet_len;
+	} else {
+		skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
+				 GFP_ATOMIC);
+		if (!skbn)
+			return NULL;
+
+		skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
+		skb_put(skbn, packet_len);
+		memcpy(skbn->data, data, packet_len);
+	}
+
+	pskb_pull(skb, packet_len);
 
 	return skbn;
 }
@@ -357,7 +374,8 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
 		return -EOPNOTSUPP;
 	}
 
-	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
+	csum_trailer = (struct rmnet_map_dl_csum_trailer *)
+		       (rmnet_map_data_ptr(skb) + len);
 
 	if (!csum_trailer->valid) {
 		priv->stats.csum_valid_unset++;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 81045df..44f6e48 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 	struct cp_private *cp;
 	int handled = 0;
 	u16 status;
+	u16 mask;
 
 	if (unlikely(dev == NULL))
 		return IRQ_NONE;
@@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 
 	spin_lock(&cp->lock);
 
+	mask = cpr16(IntrMask);
+	if (!mask)
+		goto out_unlock;
+
 	status = cpr16(IntrStatus);
 	if (!status || (status == 0xFFFF))
 		goto out_unlock;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 733e35b..20d1be2 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1738,20 +1738,17 @@ EXPORT_SYMBOL(genphy_loopback);
 
 static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
 {
-	phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
-			       PHY_10BT_FEATURES);
-
 	switch (max_speed) {
-	default:
-		return -ENOTSUPP;
-	case SPEED_1000:
-		phydev->supported |= PHY_1000BT_FEATURES;
+	case SPEED_10:
+		phydev->supported &= ~PHY_100BT_FEATURES;
 		/* fall through */
 	case SPEED_100:
-		phydev->supported |= PHY_100BT_FEATURES;
-		/* fall through */
-	case SPEED_10:
-		phydev->supported |= PHY_10BT_FEATURES;
+		phydev->supported &= ~PHY_1000BT_FEATURES;
+		break;
+	case SPEED_1000:
+		break;
+	default:
+		return -ENOTSUPP;
 	}
 
 	return 0;
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 83060fb..ad9db65 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -162,7 +162,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 	/* 1000Base-PX or 1000Base-BX10 */
 	if ((id->base.e_base_px || id->base.e_base_bx10) &&
 	    br_min <= 1300 && br_max >= 1200)
-		phylink_set(support, 1000baseX_Full);
+		phylink_set(modes, 1000baseX_Full);
 
 	/* For active or passive cables, select the link modes
 	 * based on the bit rates and the cable compliance bytes.
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 9728862..1cd8728 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2272,9 +2272,9 @@ static void tun_setup(struct net_device *dev)
 static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
 			struct netlink_ext_ack *extack)
 {
-	if (!data)
-		return 0;
-	return -EINVAL;
+	NL_SET_ERR_MSG(extack,
+		       "tun/tap creation via rtnetlink is not supported.");
+	return -EOPNOTSUPP;
 }
 
 static size_t tun_get_size(const struct net_device *dev)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c2ca6cd..ad14fbf 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -365,7 +365,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 				   struct receive_queue *rq,
 				   struct page *page, unsigned int offset,
-				   unsigned int len, unsigned int truesize)
+				   unsigned int len, unsigned int truesize,
+				   bool hdr_valid)
 {
 	struct sk_buff *skb;
 	struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -387,7 +388,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 	else
 		hdr_padded_len = sizeof(struct padded_vnet_hdr);
 
-	memcpy(hdr, p, hdr_len);
+	if (hdr_valid)
+		memcpy(hdr, p, hdr_len);
 
 	len -= hdr_len;
 	offset += hdr_padded_len;
@@ -739,7 +741,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
 				   struct virtnet_rq_stats *stats)
 {
 	struct page *page = buf;
-	struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+	struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
+					  PAGE_SIZE, true);
 
 	stats->bytes += len - vi->hdr_len;
 	if (unlikely(!skb))
@@ -842,7 +845,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 				rcu_read_unlock();
 				put_page(page);
 				head_skb = page_to_skb(vi, rq, xdp_page,
-						       offset, len, PAGE_SIZE);
+						       offset, len,
+						       PAGE_SIZE, false);
 				return head_skb;
 			}
 			break;
@@ -898,7 +902,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 		goto err_skb;
 	}
 
-	head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
+	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
 	curr_skb = head_skb;
 
 	if (unlikely(!curr_skb))
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0ba301f..b7b2659 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3308,6 +3308,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 	struct nvme_ns *ns, *next;
 	LIST_HEAD(ns_list);
 
+	/* prevent racing with ns scanning */
+	flush_work(&ctrl->scan_work);
+
 	/*
 	 * The dead states indicates the controller was not gracefully
 	 * disconnected. In that case, we won't be able to flush any data while
@@ -3463,7 +3466,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
 	nvme_mpath_stop(ctrl);
 	nvme_stop_keep_alive(ctrl);
 	flush_work(&ctrl->async_event_work);
-	flush_work(&ctrl->scan_work);
 	cancel_work_sync(&ctrl->fw_act_work);
 	if (ctrl->ops->stop_ctrl)
 		ctrl->ops->stop_ctrl(ctrl);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bb4a200..60220de 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -537,6 +537,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
 static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
 		struct nvme_id_ctrl *id)
 {
+	if (ctrl->subsys->cmic & (1 << 3))
+		dev_warn(ctrl->device,
+"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
 	return 0;
 }
 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index dc04201..b6a28de 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
 	qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
 	if (ib_dma_mapping_error(ibdev, qe->dma)) {
 		kfree(qe->data);
+		qe->data = NULL;
 		return -ENOMEM;
 	}
 
@@ -816,6 +817,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 out_free_async_qe:
 	nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
 		sizeof(struct nvme_command), DMA_TO_DEVICE);
+	ctrl->async_event_sqe.data = NULL;
 out_free_queue:
 	nvme_rdma_free_queue(&ctrl->queues[0]);
 	return error;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 4a9a673..975050a 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -80,8 +80,6 @@ struct imx6_pcie {
 #define PCIE_PL_PFLR_FORCE_LINK			(1 << 15)
 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
-#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING	(1 << 29)
-#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP		(1 << 4)
 
 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
 #define PCIE_PHY_CTRL_DATA_LOC 0
@@ -641,12 +639,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
 	return 0;
 }
 
-static int imx6_pcie_link_up(struct dw_pcie *pci)
-{
-	return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
-			PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
-}
-
 static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
 	.host_init = imx6_pcie_host_init,
 };
@@ -679,7 +671,7 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
 }
 
 static const struct dw_pcie_ops dw_pcie_ops = {
-	.link_up = imx6_pcie_link_up,
+	/* No special ops needed, but pcie-designware still expects this struct */
 };
 
 static int imx6_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index e70e425..69c9284 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -231,6 +231,7 @@ static const struct qusb2_phy_cfg sdm845_phy_cfg = {
 	.mask_core_ready = CORE_READY_STATUS,
 	.has_pll_override = true,
 	.autoresume_en	  = BIT(0),
+	.update_tune1_with_efuse = true,
 };
 
 static const char * const qusb2_phy_vreg_names[] = {
@@ -402,10 +403,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
 
 	/*
 	 * Read efuse register having TUNE2/1 parameter's high nibble.
-	 * If efuse register shows value as 0x0, or if we fail to find
-	 * a valid efuse register settings, then use default value
-	 * as 0xB for high nibble that we have already set while
-	 * configuring phy.
+	 * If efuse register shows value as 0x0 (indicating value is not
+	 * fused), or if we fail to find a valid efuse register setting,
+	 * then use default value for high nibble that we have already
+	 * set while configuring the phy.
 	 */
 	val = nvmem_cell_read(qphy->cell, NULL);
 	if (IS_ERR(val) || !val[0]) {
@@ -415,12 +416,13 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
 
 	/* Fused TUNE1/2 value is the higher nibble only */
 	if (cfg->update_tune1_with_efuse)
-		qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
-			      val[0] << 0x4);
+		qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
+				 val[0] << HSTX_TRIM_SHIFT,
+				 HSTX_TRIM_MASK);
 	else
-		qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
-			      val[0] << 0x4);
-
+		qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
+				 val[0] << HSTX_TRIM_SHIFT,
+				 HSTX_TRIM_MASK);
 }
 
 static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode)
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 8921c2b..6d1f1ba 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -2313,17 +2313,20 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
 	}
 	erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
 		GSI_NO_EVT_ERINDEX;
-	if (erindex == GSI_NO_EVT_ERINDEX || erindex >= GSI_EVT_RING_MAX) {
+	if (erindex != GSI_NO_EVT_ERINDEX && erindex >= GSI_EVT_RING_MAX) {
 		GSIERR("invalid erindex %u\n", erindex);
 		devm_kfree(gsi_ctx->dev, user_data);
 		return -GSI_STATUS_INVALID_PARAMS;
 	}
-	ctx->evtr = &gsi_ctx->evtr[erindex];
-	atomic_inc(&ctx->evtr->chan_ref_cnt);
-	if (props->prot != GSI_CHAN_PROT_GCI &&
-		ctx->evtr->props.exclusive &&
-		atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
-		ctx->evtr->chan = ctx;
+
+	if (erindex < GSI_EVT_RING_MAX) {
+		ctx->evtr = &gsi_ctx->evtr[erindex];
+		atomic_inc(&ctx->evtr->chan_ref_cnt);
+		if (props->prot != GSI_CHAN_PROT_GCI &&
+			ctx->evtr->props.exclusive &&
+			atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
+			ctx->evtr->chan = ctx;
+	}
 
 	gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
 
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 90198b5..427c177 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -191,7 +191,15 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
 	__stringify(IPA_CLIENT_ODL_DPL_CONS),
 	__stringify(IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD),
 	__stringify(IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS),
-	__stringify(RESERVED_PROD_86),
+	__stringify(IPA_CLIENT_WIGIG_PROD),
+	__stringify(IPA_CLIENT_WIGIG1_CONS),
+	__stringify(RESERVERD_PROD_88),
+	__stringify(IPA_CLIENT_WIGIG2_CONS),
+	__stringify(RESERVERD_PROD_90),
+	__stringify(IPA_CLIENT_WIGIG3_CONS),
+	__stringify(RESERVERD_PROD_92),
+	__stringify(IPA_CLIENT_WIGIG4_CONS),
+	__stringify(RESERVERD_PROD_94),
 	__stringify(IPA_CLIENT_APPS_WAN_COAL_CONS),
 };
 
@@ -1922,7 +1930,7 @@ EXPORT_SYMBOL(ipa_uc_wdi_get_dbpa);
 
 /**
  * ipa_uc_reg_rdyCB() - To register uC
- * ready CB if uC not ready
+ * ready CB if uC not ready, wdi only.
  * @inout:	[in/out] input/output parameters
  * from/to client
  *
@@ -1941,6 +1949,31 @@ int ipa_uc_reg_rdyCB(
 EXPORT_SYMBOL(ipa_uc_reg_rdyCB);
 
 /**
+* ipa_wigig_uc_init() - get uc db and register uC
+* ready CB if uC not ready, wigig only.
+* @inout:	[in/out] uc ready input/output parameters
+* from/to client
+* @int_notify: [in] wigig misc interrupt handler function
+*
+* Returns:	0 on success, negative on failure
+*
+*/
+
+int ipa_wigig_uc_init(
+	struct ipa_wdi_uc_ready_params *inout,
+	ipa_wigig_misc_int_cb int_notify,
+	phys_addr_t *uc_db_pa)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_wigig_uc_init, inout,
+		int_notify, uc_db_pa);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_uc_init);
+
+/**
  * ipa_uc_dereg_rdyCB() - To de-register uC ready CB
  *
  * Returns:	0 on success, negative on failure
@@ -3368,6 +3401,98 @@ int ipa_disable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 }
 
 /**
+ * ipa_wigig_uc_msi_init() - smmu map\unmap msi related wigig HW registers
+ *	and init\deinit uC msi config
+ */
+int ipa_wigig_uc_msi_init(bool init,
+	phys_addr_t periph_baddr_pa,
+	phys_addr_t pseudo_cause_pa,
+	phys_addr_t int_gen_tx_pa,
+	phys_addr_t int_gen_rx_pa,
+	phys_addr_t dma_ep_misc_pa)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_wigig_uc_msi_init, init,
+		periph_baddr_pa,
+		pseudo_cause_pa,
+		int_gen_tx_pa,
+		int_gen_rx_pa,
+		dma_ep_misc_pa);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_uc_msi_init);
+
+/**
+ * ipa_conn_wigig_rx_pipe_i() - connect wigig rx pipe
+ */
+int ipa_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_conn_wigig_rx_pipe_i, in, out);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_conn_wigig_rx_pipe_i);
+
+/**
+ * ipa_conn_wigig_client_i() - connect a wigig client
+ */
+int ipa_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_conn_wigig_client_i, in, out);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_conn_wigig_client_i);
+
+/**
+ * ipa_disconn_wigig_pipe_i() - disconnect a wigig pipe
+ */
+int ipa_disconn_wigig_pipe_i(enum ipa_client_type client,
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+	void *dbuff)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disconn_wigig_pipe_i, client,
+		pipe_smmu, dbuff);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disconn_wigig_pipe_i);
+
+/**
+ * ipa_enable_wigig_pipe() - enable a wigig pipe
+ */
+int ipa_enable_wigig_pipe_i(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_enable_wigig_pipe_i, client);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_enable_wigig_pipe_i);
+
+/**
+ * ipa_disable_wigig_pipe_i() - disable a wigig pipe
+ */
+int ipa_disable_wigig_pipe_i(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disable_wigig_pipe_i, client);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disable_wigig_pipe_i);
+
+/**
  * ipa_tz_unlock_reg() - Allow AP access to memory regions controlled by TZ
  */
 int ipa_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs)
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 4da9ba14..e43a5d6 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/ipa_mhi.h>
@@ -421,6 +421,32 @@ struct ipa_api_controller {
 	int (*ipa_is_vlan_mode)(enum ipa_vlan_ifaces iface, bool *res);
 
 	bool (*ipa_pm_is_used)(void);
+
+	int (*ipa_wigig_uc_init)(
+		struct ipa_wdi_uc_ready_params *inout,
+		ipa_wigig_misc_int_cb int_notify,
+		phys_addr_t *uc_db_pa);
+
+	int (*ipa_conn_wigig_rx_pipe_i)(void *in,
+		struct ipa_wigig_conn_out_params *out);
+
+	int (*ipa_conn_wigig_client_i)(void *in,
+		struct ipa_wigig_conn_out_params *out);
+
+	int (*ipa_disconn_wigig_pipe_i)(enum ipa_client_type client,
+		struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+		void *dbuff);
+
+	int (*ipa_wigig_uc_msi_init)(bool init,
+		phys_addr_t periph_baddr_pa,
+		phys_addr_t pseudo_cause_pa,
+		phys_addr_t int_gen_tx_pa,
+		phys_addr_t int_gen_rx_pa,
+		phys_addr_t dma_ep_misc_pa);
+
+	int (*ipa_enable_wigig_pipe_i)(enum ipa_client_type client);
+
+	int (*ipa_disable_wigig_pipe_i)(enum ipa_client_type client);
 };
 
 #ifdef CONFIG_IPA3
diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile
index edfc29c..537d4688 100644
--- a/drivers/platform/msm/ipa/ipa_clients/Makefile
+++ b/drivers/platform/msm/ipa/ipa_clients/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o ipa_gsb.o
+obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o ipa_gsb.o ipa_wigig.o
 obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o
 obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
 obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c b/drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c
new file mode 100644
index 0000000..2d5bd8b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c
@@ -0,0 +1,1326 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ipa_wigig.h>
+#include <linux/string.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
+
+#define OFFLOAD_DRV_NAME "ipa_wigig"
+#define IPA_WIGIG_DBG(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_WIGIG_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_WIGIG_ERR(fmt, args...) \
+	do { \
+		pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_WIGIG_RX_PIPE_IDX	0
+#define IPA_WIGIG_TX_PIPE_NUM	4
+#define IPA_WIGIG_MAX_PIPES	5
+
+struct ipa_wigig_intf_info {
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	u8 netdev_mac[IPA_MAC_ADDR_SIZE];
+	u8 hdr_len;
+	u32 partial_hdr_hdl[IPA_IP_MAX];
+	struct list_head link;
+};
+
+struct ipa_wigig_context {
+	struct list_head head_intf_list;
+	struct mutex lock;
+	u32 ipa_pm_hdl;
+	phys_addr_t periph_baddr_pa;
+	phys_addr_t pseudo_cause_pa;
+	phys_addr_t int_gen_tx_pa;
+	phys_addr_t int_gen_rx_pa;
+	phys_addr_t dma_ep_misc_pa;
+	struct ipa_wigig_pipe_setup_info_smmu pipes_smmu[IPA_WIGIG_MAX_PIPES];
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu rx_buff_smmu;
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu
+		tx_buff_smmu[IPA_WIGIG_TX_PIPE_NUM];
+};
+
+static struct ipa_wigig_context *ipa_wigig_ctx;
+
+int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
+	struct ipa_wigig_init_out_params *out)
+{
+	struct ipa_wdi_uc_ready_params inout;
+
+	if (!in || !out) {
+		IPA_WIGIG_ERR("invalid params in=%pK, out %pK\n", in, out);
+		return -EINVAL;
+	}
+
+	IPA_WIGIG_DBG("\n");
+	if (ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("ipa_wigig_ctx was initialized before\n");
+		return -EINVAL;
+	}
+
+	ipa_wigig_ctx = kzalloc(sizeof(*ipa_wigig_ctx), GFP_KERNEL);
+	if (ipa_wigig_ctx == NULL)
+		return -ENOMEM;
+
+	mutex_init(&ipa_wigig_ctx->lock);
+	INIT_LIST_HEAD(&ipa_wigig_ctx->head_intf_list);
+
+	ipa_wigig_ctx->pseudo_cause_pa = in->pseudo_cause_pa;
+	ipa_wigig_ctx->int_gen_tx_pa = in->int_gen_tx_pa;
+	ipa_wigig_ctx->int_gen_rx_pa = in->int_gen_rx_pa;
+	ipa_wigig_ctx->dma_ep_misc_pa = in->dma_ep_misc_pa;
+	ipa_wigig_ctx->periph_baddr_pa = in->periph_baddr_pa;
+
+	inout.notify = in->notify;
+	inout.priv = in->priv;
+	if (ipa_wigig_uc_init(&inout, in->int_notify, &out->uc_db_pa)) {
+		kfree(ipa_wigig_ctx);
+		ipa_wigig_ctx = NULL;
+		return -EFAULT;
+	}
+
+	out->is_uc_ready = inout.is_uC_ready;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_init);
+
+int ipa_wigig_cleanup(void)
+{
+	struct ipa_wigig_intf_info *entry;
+	struct ipa_wigig_intf_info *next;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!ipa_wigig_ctx)
+		return -ENODEV;
+
+	/* clear interface list */
+	list_for_each_entry_safe(entry, next,
+		&ipa_wigig_ctx->head_intf_list, link) {
+		list_del(&entry->link);
+		kfree(entry);
+	}
+
+	mutex_destroy(&ipa_wigig_ctx->lock);
+	kfree(ipa_wigig_ctx);
+	ipa_wigig_ctx = NULL;
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_cleanup);
+
+bool ipa_wigig_is_smmu_enabled(void)
+{
+	struct ipa_smmu_in_params in;
+	struct ipa_smmu_out_params out;
+
+	IPA_WIGIG_DBG("\n");
+
+	in.smmu_client = IPA_SMMU_WLAN_CLIENT;
+
+	ipa_get_smmu_params(&in, &out);
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return out.smmu_enable;
+}
+EXPORT_SYMBOL(ipa_wigig_is_smmu_enabled);
+
+static int ipa_wigig_commit_partial_hdr(
+	struct ipa_ioc_add_hdr *hdr,
+	const char *netdev_name,
+	struct ipa_wigig_hdr_info *hdr_info)
+{
+	int i;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!netdev_name) {
+		IPA_WIGIG_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	hdr->commit = 1;
+	hdr->num_hdrs = 2;
+
+	snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+		"%s_ipv4", netdev_name);
+	snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+		"%s_ipv6", netdev_name);
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+		hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
+		memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
+		hdr->hdr[i].type = hdr_info[i].hdr_type;
+		hdr->hdr[i].is_partial = 1;
+		hdr->hdr[i].is_eth2_ofst_valid = 1;
+		hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
+	}
+
+	if (ipa_add_hdr(hdr)) {
+		IPA_WIGIG_ERR("fail to add partial headers\n");
+		return -EFAULT;
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static void ipa_wigig_free_msg(void *msg, uint32_t len, uint32_t type)
+{
+	IPA_WIGIG_DBG("free msg type:%d, len:%d, buff %pK", type, len, msg);
+	kfree(msg);
+	IPA_WIGIG_DBG("exit\n");
+}
+
+static int ipa_wigig_send_msg(uint8_t msg_type,
+	const char *netdev_name,
+	u8 *netdev_mac)
+{
+	struct ipa_msg_meta msg_meta;
+	struct ipa_wlan_msg *msg;
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	msg_meta.msg_type = msg_type;
+	msg_meta.msg_len = sizeof(struct ipa_wlan_msg);
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+	if (msg == NULL)
+		return -ENOMEM;
+
+	strlcpy(msg->name, netdev_name, IPA_RESOURCE_NAME_MAX);
+	memcpy(msg->mac_addr, netdev_mac, IPA_MAC_ADDR_SIZE);
+
+	IPA_WIGIG_DBG("send msg type:%d, len:%d, buff %pK", msg_meta.msg_type,
+		msg_meta.msg_len, msg);
+	ret = ipa_send_msg(&msg_meta, msg, ipa_wigig_free_msg);
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return ret;
+}
+
+int ipa_wigig_reg_intf(
+	struct ipa_wigig_reg_intf_in_params *in)
+{
+	struct ipa_wigig_intf_info *new_intf;
+	struct ipa_wigig_intf_info *entry;
+	struct ipa_tx_intf tx;
+	struct ipa_rx_intf rx;
+	struct ipa_ioc_tx_intf_prop tx_prop[2];
+	struct ipa_ioc_rx_intf_prop rx_prop[2];
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_ioc_del_hdr *del_hdr = NULL;
+	u32 len;
+	int ret = 0;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (in == NULL) {
+		IPA_WIGIG_ERR("invalid params in=%pK\n", in);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	IPA_WIGIG_DBG("register interface for netdev %s\n",
+		in->netdev_name);
+
+	mutex_lock(&ipa_wigig_ctx->lock);
+	list_for_each_entry(entry, &ipa_wigig_ctx->head_intf_list, link)
+		if (strcmp(entry->netdev_name, in->netdev_name) == 0) {
+			IPA_WIGIG_DBG("intf was added before.\n");
+			mutex_unlock(&ipa_wigig_ctx->lock);
+			return 0;
+		}
+
+	IPA_WIGIG_DBG("intf was not added before, proceed.\n");
+	new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL);
+	if (new_intf == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	INIT_LIST_HEAD(&new_intf->link);
+	strlcpy(new_intf->netdev_name, in->netdev_name,
+		sizeof(new_intf->netdev_name));
+	new_intf->hdr_len = in->hdr_info[0].hdr_len;
+	memcpy(new_intf->netdev_mac, in->netdev_mac, IPA_MAC_ADDR_SIZE);
+
+	/* add partial header */
+	len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(len, GFP_KERNEL);
+	if (hdr == NULL) {
+		ret = -EFAULT;
+		goto fail_alloc_hdr;
+	}
+
+	if (ipa_wigig_commit_partial_hdr(hdr,
+		in->netdev_name,
+		in->hdr_info)) {
+		IPA_WIGIG_ERR("fail to commit partial headers\n");
+		ret = -EFAULT;
+		goto fail_commit_hdr;
+	}
+
+	new_intf->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
+	new_intf->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
+	IPA_WIGIG_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
+		hdr->hdr[IPA_IP_v4].hdr_hdl, hdr->hdr[IPA_IP_v6].hdr_hdl);
+
+	/* populate tx prop */
+	tx.num_props = 2;
+	tx.prop = tx_prop;
+
+	memset(tx_prop, 0, sizeof(tx_prop));
+	tx_prop[0].ip = IPA_IP_v4;
+	/*
+	 * for consumers, we register a default pipe, but IPACM will determine
+	 * the actual pipe according to the relevant client MAC
+	 */
+	tx_prop[0].dst_pipe = IPA_CLIENT_WIGIG1_CONS;
+	tx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type;
+	strlcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
+		sizeof(tx_prop[0].hdr_name));
+
+	tx_prop[1].ip = IPA_IP_v6;
+	tx_prop[1].dst_pipe = IPA_CLIENT_WIGIG1_CONS;
+	tx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
+	strlcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
+		sizeof(tx_prop[1].hdr_name));
+
+	/* populate rx prop */
+	rx.num_props = 2;
+	rx.prop = rx_prop;
+
+	memset(rx_prop, 0, sizeof(rx_prop));
+	rx_prop[0].ip = IPA_IP_v4;
+	rx_prop[0].src_pipe = IPA_CLIENT_WIGIG_PROD;
+	rx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type;
+
+	rx_prop[1].ip = IPA_IP_v6;
+	rx_prop[1].src_pipe = IPA_CLIENT_WIGIG_PROD;
+	rx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
+
+	if (ipa_register_intf(in->netdev_name, &tx, &rx)) {
+		IPA_WIGIG_ERR("fail to add interface prop\n");
+		ret = -EFAULT;
+		goto fail_register;
+	}
+
+	if (ipa_wigig_send_msg(WLAN_AP_CONNECT,
+		in->netdev_name,
+		in->netdev_mac)) {
+		IPA_WIGIG_ERR("couldn't send msg to IPACM\n");
+		ret = -EFAULT;
+		goto fail_sendmsg;
+	}
+
+	list_add(&new_intf->link, &ipa_wigig_ctx->head_intf_list);
+
+	kfree(hdr);
+	mutex_unlock(&ipa_wigig_ctx->lock);
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+fail_sendmsg:
+	ipa_deregister_intf(in->netdev_name);
+fail_register:
+	del_hdr = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
+		2 * sizeof(struct ipa_hdr_del), GFP_KERNEL);
+	if (del_hdr) {
+		del_hdr->commit = 1;
+		del_hdr->num_hdls = 2;
+		del_hdr->hdl[0].hdl = new_intf->partial_hdr_hdl[IPA_IP_v4];
+		del_hdr->hdl[1].hdl = new_intf->partial_hdr_hdl[IPA_IP_v6];
+		ipa_del_hdr(del_hdr);
+		kfree(del_hdr);
+	}
+	new_intf->partial_hdr_hdl[IPA_IP_v4] = 0;
+	new_intf->partial_hdr_hdl[IPA_IP_v6] = 0;
+fail_commit_hdr:
+	kfree(hdr);
+fail_alloc_hdr:
+	kfree(new_intf);
+fail:
+	mutex_unlock(&ipa_wigig_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_reg_intf);
+
+int ipa_wigig_dereg_intf(const char *netdev_name)
+{
+	int len, ret;
+	struct ipa_ioc_del_hdr *hdr = NULL;
+	struct ipa_wigig_intf_info *entry;
+	struct ipa_wigig_intf_info *next;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!netdev_name) {
+		IPA_WIGIG_ERR("no netdev name\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	mutex_lock(&ipa_wigig_ctx->lock);
+
+	ret = -EFAULT;
+
+	list_for_each_entry_safe(entry, next, &ipa_wigig_ctx->head_intf_list,
+		link)
+		if (strcmp(entry->netdev_name, netdev_name) == 0) {
+			len = sizeof(struct ipa_ioc_del_hdr) +
+				2 * sizeof(struct ipa_hdr_del);
+			hdr = kzalloc(len, GFP_KERNEL);
+			if (hdr == NULL) {
+				mutex_unlock(&ipa_wigig_ctx->lock);
+				return -ENOMEM;
+			}
+
+			hdr->commit = 1;
+			hdr->num_hdls = 2;
+			hdr->hdl[0].hdl = entry->partial_hdr_hdl[0];
+			hdr->hdl[1].hdl = entry->partial_hdr_hdl[1];
+			IPA_WIGIG_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
+				hdr->hdl[0].hdl, hdr->hdl[1].hdl);
+
+			if (ipa_del_hdr(hdr)) {
+				IPA_WIGIG_ERR(
+					"fail to delete partial header\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			if (ipa_deregister_intf(entry->netdev_name)) {
+				IPA_WIGIG_ERR("fail to del interface props\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			if (ipa_wigig_send_msg(WLAN_AP_DISCONNECT,
+				entry->netdev_name,
+				entry->netdev_mac)) {
+				IPA_WIGIG_ERR("couldn't send msg to IPACM\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			list_del(&entry->link);
+			kfree(entry);
+
+			ret = 0;
+			break;
+		}
+
+	IPA_WIGIG_DBG("exit\n");
+fail:
+	kfree(hdr);
+	mutex_unlock(&ipa_wigig_ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_dereg_intf);
+
+static void ipa_wigig_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	IPA_WIGIG_DBG("received pm event %d\n", event);
+}
+
+int ipa_wigig_conn_rx_pipe(struct ipa_wigig_conn_rx_in_params *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	int ret;
+	struct ipa_pm_register_params pm_params;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!in || !out) {
+		IPA_WIGIG_ERR("empty parameters. in=%pK out=%pK\n", in, out);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	if (ipa_wigig_is_smmu_enabled()) {
+		IPA_WIGIG_ERR("IPA SMMU is enabled, wrong API used\n");
+		return -EFAULT;
+	}
+
+	ret = ipa_uc_state_check();
+	if (ret) {
+		IPA_WIGIG_ERR("uC not ready\n");
+		return ret;
+	}
+
+	pm_params.name = "wigig";
+	pm_params.callback = ipa_wigig_pm_cb;
+	pm_params.user_data = NULL;
+	pm_params.group = IPA_PM_GROUP_DEFAULT;
+	if (ipa_pm_register(&pm_params, &ipa_wigig_ctx->ipa_pm_hdl)) {
+		IPA_WIGIG_ERR("fail to register ipa pm\n");
+		ret = -EFAULT;
+		goto fail_pm;
+	}
+	IPA_WIGIG_DBG("pm hdl %d\n", ipa_wigig_ctx->ipa_pm_hdl);
+
+	ret = ipa_wigig_uc_msi_init(true,
+		ipa_wigig_ctx->periph_baddr_pa,
+		ipa_wigig_ctx->pseudo_cause_pa,
+		ipa_wigig_ctx->int_gen_tx_pa,
+		ipa_wigig_ctx->int_gen_rx_pa,
+		ipa_wigig_ctx->dma_ep_misc_pa);
+	if (ret) {
+		IPA_WIGIG_ERR("failed configuring msi regs at uC\n");
+		ret = -EFAULT;
+		goto fail_msi;
+	}
+
+	if (ipa_conn_wigig_rx_pipe_i(in, out)) {
+		IPA_WIGIG_ERR("fail to connect rx pipe\n");
+		ret = -EFAULT;
+		goto fail_connect_pipe;
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+
+fail_connect_pipe:
+	ipa_wigig_uc_msi_init(false,
+		ipa_wigig_ctx->periph_baddr_pa,
+		ipa_wigig_ctx->pseudo_cause_pa,
+		ipa_wigig_ctx->int_gen_tx_pa,
+		ipa_wigig_ctx->int_gen_rx_pa,
+		ipa_wigig_ctx->dma_ep_misc_pa);
+fail_msi:
+	ipa_pm_deregister(ipa_wigig_ctx->ipa_pm_hdl);
+fail_pm:
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_conn_rx_pipe);
+
+static int ipa_wigig_client_to_idx(enum ipa_client_type client,
+	unsigned int *idx)
+{
+	switch (client) {
+	case IPA_CLIENT_WIGIG1_CONS:
+		*idx = 1;
+		break;
+	case IPA_CLIENT_WIGIG2_CONS:
+		*idx = 2;
+		break;
+	case IPA_CLIENT_WIGIG3_CONS:
+		*idx = 3;
+		break;
+	case IPA_CLIENT_WIGIG4_CONS:
+		*idx = 4;
+		break;
+	default:
+		IPA_WIGIG_ERR("invalid client %d\n", client);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ipa_wigig_clean_pipe_smmu_info(unsigned int idx)
+{
+	IPA_WIGIG_DBG("cleaning pipe %d info\n", idx);
+
+	if (idx >= IPA_WIGIG_MAX_PIPES) {
+		IPA_WIGIG_ERR("invalid index %d\n", idx);
+		return -EINVAL;
+	}
+	kfree(ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base.sgl);
+	kfree(ipa_wigig_ctx->pipes_smmu[idx].status_ring_base.sgl);
+
+	memset(ipa_wigig_ctx->pipes_smmu + idx,
+		0,
+		sizeof(ipa_wigig_ctx->pipes_smmu[idx]));
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static int ipa_wigig_store_pipe_smmu_info
+	(struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu, unsigned int idx)
+{
+	unsigned int nents;
+	struct scatterlist *sgl;
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	/* store regs */
+	ipa_wigig_ctx->pipes_smmu[idx].desc_ring_HWHEAD_pa =
+		pipe_smmu->desc_ring_HWHEAD_pa;
+	ipa_wigig_ctx->pipes_smmu[idx].desc_ring_HWTAIL_pa =
+		pipe_smmu->desc_ring_HWTAIL_pa;
+
+	ipa_wigig_ctx->pipes_smmu[idx].status_ring_HWHEAD_pa =
+		pipe_smmu->status_ring_HWHEAD_pa;
+	ipa_wigig_ctx->pipes_smmu[idx].status_ring_HWTAIL_pa =
+		pipe_smmu->status_ring_HWTAIL_pa;
+
+	/* store rings IOVAs */
+	ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base_iova =
+		pipe_smmu->desc_ring_base_iova;
+	ipa_wigig_ctx->pipes_smmu[idx].status_ring_base_iova =
+		pipe_smmu->status_ring_base_iova;
+
+	/* copy sgt */
+	nents = pipe_smmu->desc_ring_base.nents;
+	sgl = kmemdup(pipe_smmu->desc_ring_base.sgl,
+		nents * sizeof(struct scatterlist),
+		GFP_KERNEL);
+	if (sgl == NULL) {
+		ret = -ENOMEM;
+		goto fail_desc;
+	}
+	ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base.sgl = sgl;
+	ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base.nents = nents;
+	ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base.orig_nents =
+		pipe_smmu->desc_ring_base.orig_nents;
+
+	nents = pipe_smmu->status_ring_base.nents;
+	sgl = kmemdup(pipe_smmu->status_ring_base.sgl,
+		nents * sizeof(struct scatterlist),
+		GFP_KERNEL);
+	if (sgl == NULL) {
+		ret = -ENOMEM;
+		goto fail_stat;
+	}
+	ipa_wigig_ctx->pipes_smmu[idx].status_ring_base.sgl = sgl;
+	ipa_wigig_ctx->pipes_smmu[idx].status_ring_base.nents = nents;
+	ipa_wigig_ctx->pipes_smmu[idx].status_ring_base.orig_nents =
+		pipe_smmu->status_ring_base.orig_nents;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+fail_stat:
+	kfree(ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base.sgl);
+	memset(&ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base,
+		0, sizeof(ipa_wigig_ctx->pipes_smmu[idx].desc_ring_base));
+fail_desc:
+	return ret;
+}
+
+static int ipa_wigig_get_pipe_smmu_info(
+	struct ipa_wigig_pipe_setup_info_smmu **pipe_smmu, unsigned int idx)
+{
+	if (idx >= IPA_WIGIG_MAX_PIPES) {
+		IPA_WIGIG_ERR("exceeded pipe num %d > %d\n",
+			idx, IPA_WIGIG_MAX_PIPES);
+		return -EINVAL;
+	}
+
+	*pipe_smmu = &ipa_wigig_ctx->pipes_smmu[idx];
+
+	return 0;
+}
+static void  ipa_wigig_clean_rx_buff_smmu_info(void)
+{
+	IPA_WIGIG_DBG("clearing rx buff smmu info\n");
+
+	kfree(ipa_wigig_ctx->rx_buff_smmu.data_buffer_base.sgl);
+	memset(&ipa_wigig_ctx->rx_buff_smmu,
+		0,
+		sizeof(ipa_wigig_ctx->rx_buff_smmu));
+
+	IPA_WIGIG_DBG("\n");
+
+	return;
+
+}
+
+static int ipa_wigig_store_rx_buff_smmu_info(
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu)
+{
+	unsigned int nents;
+	struct scatterlist *sgl;
+
+	IPA_WIGIG_DBG("\n");
+
+	nents = dbuff_smmu->data_buffer_base.nents;
+	sgl = kmemdup(dbuff_smmu->data_buffer_base.sgl,
+		nents * sizeof(struct scatterlist),
+		GFP_KERNEL);
+	if (sgl == NULL)
+		return -ENOMEM;
+
+	ipa_wigig_ctx->rx_buff_smmu.data_buffer_base.sgl = sgl;
+	ipa_wigig_ctx->rx_buff_smmu.data_buffer_base.nents = nents;
+	ipa_wigig_ctx->rx_buff_smmu.data_buffer_base.orig_nents =
+		dbuff_smmu->data_buffer_base.orig_nents;
+	ipa_wigig_ctx->rx_buff_smmu.data_buffer_base_iova =
+		dbuff_smmu->data_buffer_base_iova;
+	ipa_wigig_ctx->rx_buff_smmu.data_buffer_size =
+		dbuff_smmu->data_buffer_size;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static int ipa_wigig_get_rx_buff_smmu_info(
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu **dbuff_smmu)
+{
+	IPA_WIGIG_DBG("\n");
+
+	*dbuff_smmu = &ipa_wigig_ctx->rx_buff_smmu;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static int ipa_wigig_store_tx_buff_smmu_info(
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu *dbuff_smmu,
+	unsigned int idx)
+{
+	unsigned int nents;
+	struct scatterlist *sgl;
+	int result, i;
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu *tx_buff_smmu;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (idx > (IPA_WIGIG_TX_PIPE_NUM - 1)) {
+		IPA_WIGIG_ERR("invalid tx index %d\n", idx);
+		return -EINVAL;
+	}
+
+	tx_buff_smmu = ipa_wigig_ctx->tx_buff_smmu + idx;
+
+	tx_buff_smmu->data_buffer_base =
+		kcalloc(dbuff_smmu->num_buffers,
+			sizeof(struct sg_table),
+			GFP_KERNEL);
+	if (!tx_buff_smmu->data_buffer_base)
+		return -ENOMEM;
+
+	tx_buff_smmu->data_buffer_base_iova =
+		kcalloc(dbuff_smmu->num_buffers, sizeof(u64), GFP_KERNEL);
+	if (!tx_buff_smmu->data_buffer_base_iova) {
+		result = -ENOMEM;
+		goto fail_iova;
+	}
+
+	for (i = 0; i < dbuff_smmu->num_buffers; i++) {
+		nents = dbuff_smmu->data_buffer_base[i].nents;
+		sgl = kmemdup(dbuff_smmu->data_buffer_base[i].sgl,
+			nents * sizeof(struct scatterlist),
+			GFP_KERNEL);
+		if (sgl == NULL) {
+			result = -ENOMEM;
+			goto fail_sgl;
+		}
+
+		tx_buff_smmu->data_buffer_base[i].sgl =
+			sgl;
+		tx_buff_smmu->data_buffer_base[i].nents =
+			nents;
+		tx_buff_smmu->data_buffer_base[i].orig_nents =
+			dbuff_smmu->data_buffer_base[i].orig_nents;
+		tx_buff_smmu->data_buffer_base_iova[i] =
+			dbuff_smmu->data_buffer_base_iova[i];
+	}
+	tx_buff_smmu->num_buffers = dbuff_smmu->num_buffers;
+	tx_buff_smmu->data_buffer_size =
+		dbuff_smmu->data_buffer_size;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+
+fail_sgl:
+	i--;
+	for (; i >= 0; i--)
+		kfree(tx_buff_smmu->data_buffer_base[i].sgl);
+	kfree(tx_buff_smmu->data_buffer_base_iova);
+	tx_buff_smmu->data_buffer_base_iova = NULL;
+fail_iova:
+	kfree(tx_buff_smmu->data_buffer_base);
+	tx_buff_smmu->data_buffer_base = NULL;
+	return result;
+}
+
+static int ipa_wigig_clean_tx_buff_smmu_info(unsigned int idx)
+{
+	unsigned int i;
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu *dbuff_smmu;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (idx > (IPA_WIGIG_TX_PIPE_NUM - 1)) {
+		IPA_WIGIG_ERR("invalid tx index %d\n", idx);
+		return -EINVAL;
+	}
+
+	dbuff_smmu = &ipa_wigig_ctx->tx_buff_smmu[idx];
+
+	if (!dbuff_smmu->data_buffer_base) {
+		IPA_WIGIG_ERR("no pa has been allocated\n");
+		return -EFAULT;
+	}
+
+	for (i = 0; i < dbuff_smmu->num_buffers; i++)
+		kfree(dbuff_smmu->data_buffer_base[i].sgl);
+
+	kfree(dbuff_smmu->data_buffer_base);
+	dbuff_smmu->data_buffer_base = NULL;
+
+	kfree(dbuff_smmu->data_buffer_base_iova);
+	dbuff_smmu->data_buffer_base_iova = NULL;
+
+	dbuff_smmu->data_buffer_size = 0;
+	dbuff_smmu->num_buffers = 0;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+
+static int ipa_wigig_get_tx_buff_smmu_info(
+struct ipa_wigig_tx_pipe_data_buffer_info_smmu **dbuff_smmu,
+	unsigned int idx)
+{
+	if (idx > (IPA_WIGIG_TX_PIPE_NUM - 1)) {
+		IPA_WIGIG_ERR("invalid tx index %d\n", idx);
+		return -EINVAL;
+	}
+
+	*dbuff_smmu = &ipa_wigig_ctx->tx_buff_smmu[idx];
+
+	return 0;
+}
+
+static int ipa_wigig_store_rx_smmu_info
+	(struct ipa_wigig_conn_rx_in_params_smmu *in)
+{
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	ret = ipa_wigig_store_pipe_smmu_info(&in->pipe_smmu,
+		IPA_WIGIG_RX_PIPE_IDX);
+	if (ret)
+		return ret;
+
+	ret = ipa_wigig_store_rx_buff_smmu_info(&in->dbuff_smmu);
+	if (ret)
+		goto fail_buff;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+
+fail_buff:
+	ipa_wigig_clean_pipe_smmu_info(IPA_WIGIG_RX_PIPE_IDX);
+	return ret;
+}
+
+static int ipa_wigig_store_client_smmu_info
+(struct ipa_wigig_conn_tx_in_params_smmu *in, enum ipa_client_type client)
+{
+	int ret;
+	unsigned int idx;
+
+	IPA_WIGIG_DBG("\n");
+
+	ret = ipa_wigig_client_to_idx(client, &idx);
+	if (ret)
+		return ret;
+
+	ret = ipa_wigig_store_pipe_smmu_info(&in->pipe_smmu, idx);
+	if (ret)
+		return ret;
+
+	ret = ipa_wigig_store_tx_buff_smmu_info(&in->dbuff_smmu, idx - 1);
+	if (ret)
+		goto fail_buff;
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+
+fail_buff:
+	ipa_wigig_clean_pipe_smmu_info(IPA_WIGIG_RX_PIPE_IDX);
+	return ret;
+}
+
+static int ipa_wigig_get_rx_smmu_info(
+	struct ipa_wigig_pipe_setup_info_smmu **pipe_smmu,
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu **dbuff_smmu)
+{
+	int ret;
+
+	ret = ipa_wigig_get_pipe_smmu_info(pipe_smmu, IPA_WIGIG_RX_PIPE_IDX);
+	if (ret)
+		return ret;
+
+	ret = ipa_wigig_get_rx_buff_smmu_info(dbuff_smmu);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int ipa_wigig_get_tx_smmu_info(
+	struct ipa_wigig_pipe_setup_info_smmu **pipe_smmu,
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu **dbuff_smmu,
+	enum ipa_client_type client)
+{
+	unsigned int idx;
+	int ret;
+
+	ret = ipa_wigig_client_to_idx(client, &idx);
+	if (ret)
+		return ret;
+
+	ret = ipa_wigig_get_pipe_smmu_info(pipe_smmu, idx);
+	if (ret)
+		return ret;
+
+	ret = ipa_wigig_get_tx_buff_smmu_info(dbuff_smmu, idx - 1);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int ipa_wigig_clean_smmu_info(enum ipa_client_type client)
+{
+	int ret;
+
+	if (client == IPA_CLIENT_WIGIG_PROD) {
+		ret = ipa_wigig_clean_pipe_smmu_info(IPA_WIGIG_RX_PIPE_IDX);
+		if (ret)
+			return ret;
+		ipa_wigig_clean_rx_buff_smmu_info();
+	} else {
+		unsigned int idx;
+
+		ret = ipa_wigig_client_to_idx(client, &idx);
+		if (ret)
+			return ret;
+
+		ret = ipa_wigig_clean_pipe_smmu_info(idx);
+		if (ret)
+			return ret;
+
+		ret = ipa_wigig_clean_tx_buff_smmu_info(idx - 1);
+		if (ret) {
+			IPA_WIGIG_ERR(
+				"cleaned tx pipe info but wasn't able to clean buff info, client %d\n"
+			, client);
+			WARN_ON(1);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+int ipa_wigig_conn_rx_pipe_smmu(
+	struct ipa_wigig_conn_rx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	int ret;
+	struct ipa_pm_register_params pm_params;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!in || !out) {
+		IPA_WIGIG_ERR("empty parameters. in=%pK out=%pK\n", in, out);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	if (!ipa_wigig_is_smmu_enabled()) {
+		IPA_WIGIG_ERR("IPA SMMU is disabled, wrong API used\n");
+		return -EFAULT;
+	}
+
+	ret = ipa_uc_state_check();
+	if (ret) {
+		IPA_WIGIG_ERR("uC not ready\n");
+		return ret;
+	}
+
+	pm_params.name = "wigig";
+	pm_params.callback = ipa_wigig_pm_cb;
+	pm_params.user_data = NULL;
+	pm_params.group = IPA_PM_GROUP_DEFAULT;
+	if (ipa_pm_register(&pm_params, &ipa_wigig_ctx->ipa_pm_hdl)) {
+		IPA_WIGIG_ERR("fail to register ipa pm\n");
+		ret = -EFAULT;
+		goto fail_pm;
+	}
+
+	ret = ipa_wigig_uc_msi_init(true,
+		ipa_wigig_ctx->periph_baddr_pa,
+		ipa_wigig_ctx->pseudo_cause_pa,
+		ipa_wigig_ctx->int_gen_tx_pa,
+		ipa_wigig_ctx->int_gen_rx_pa,
+		ipa_wigig_ctx->dma_ep_misc_pa);
+	if (ret) {
+		IPA_WIGIG_ERR("failed configuring msi regs at uC\n");
+		ret = -EFAULT;
+		goto fail_msi;
+	}
+
+	if (ipa_conn_wigig_rx_pipe_i(in, out)) {
+		IPA_WIGIG_ERR("fail to connect rx pipe\n");
+		ret = -EFAULT;
+		goto fail_connect_pipe;
+	}
+
+	if (ipa_wigig_store_rx_smmu_info(in)) {
+		IPA_WIGIG_ERR("fail to store smmu data for rx pipe\n");
+		ret = -EFAULT;
+		goto fail_smmu_store;
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+
+fail_smmu_store:
+	ipa_disconn_wigig_pipe_i(IPA_CLIENT_WIGIG_PROD,
+		&in->pipe_smmu,
+		&in->dbuff_smmu);
+fail_connect_pipe:
+	ipa_wigig_uc_msi_init(false,
+		ipa_wigig_ctx->periph_baddr_pa,
+		ipa_wigig_ctx->pseudo_cause_pa,
+		ipa_wigig_ctx->int_gen_tx_pa,
+		ipa_wigig_ctx->int_gen_rx_pa,
+		ipa_wigig_ctx->dma_ep_misc_pa);
+fail_msi:
+	ipa_pm_deregister(ipa_wigig_ctx->ipa_pm_hdl);
+fail_pm:
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_conn_rx_pipe_smmu);
+
+int ipa_wigig_set_perf_profile(u32 max_supported_bw_mbps)
+{
+	IPA_WIGIG_DBG("setting throughput to %d\n", max_supported_bw_mbps);
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	IPA_WIGIG_DBG("ipa_pm handle %d\n", ipa_wigig_ctx->ipa_pm_hdl);
+	if (ipa_pm_set_throughput(ipa_wigig_ctx->ipa_pm_hdl,
+		max_supported_bw_mbps)) {
+		IPA_WIGIG_ERR("fail to setup pm perf profile\n");
+		return -EFAULT;
+	}
+	IPA_WIGIG_DBG("exit\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_set_perf_profile);
+
+int ipa_wigig_conn_client(struct ipa_wigig_conn_tx_in_params *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	IPA_WIGIG_DBG("\n");
+
+	if (!in || !out) {
+		IPA_WIGIG_ERR("empty parameters. in=%pK out=%pK\n", in, out);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	if (ipa_wigig_is_smmu_enabled()) {
+		IPA_WIGIG_ERR("IPA SMMU is enabled, wrong API used\n");
+		return -EFAULT;
+	}
+
+	if (ipa_uc_state_check()) {
+		IPA_WIGIG_ERR("uC not ready\n");
+		return -EFAULT;
+	}
+
+	if (ipa_conn_wigig_client_i(in, out)) {
+		IPA_WIGIG_ERR(
+			"fail to connect client. MAC [%X][%X][%X][%X][%X][%X]\n"
+		, in->client_mac[0], in->client_mac[1], in->client_mac[2]
+		, in->client_mac[3], in->client_mac[4], in->client_mac[5]);
+		return -EFAULT;
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_conn_client);
+
+int ipa_wigig_conn_client_smmu(
+	struct ipa_wigig_conn_tx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	if (!in || !out) {
+		IPA_WIGIG_ERR("empty parameters. in=%pK out=%pK\n", in, out);
+		return -EINVAL;
+	}
+
+	if (!ipa_wigig_ctx) {
+		IPA_WIGIG_ERR("wigig ctx is not initialized\n");
+		return -EPERM;
+	}
+
+	if (!ipa_wigig_is_smmu_enabled()) {
+		IPA_WIGIG_ERR("IPA SMMU is disabled, wrong API used\n");
+		return -EFAULT;
+	}
+
+	ret = ipa_uc_state_check();
+	if (ret) {
+		IPA_WIGIG_ERR("uC not ready\n");
+		return ret;
+	}
+
+	if (ipa_conn_wigig_client_i(in, out)) {
+		IPA_WIGIG_ERR(
+			"fail to connect client. MAC [%X][%X][%X][%X][%X][%X]\n"
+			, in->client_mac[0], in->client_mac[1]
+			, in->client_mac[2], in->client_mac[3]
+			, in->client_mac[4], in->client_mac[5]);
+		return -EFAULT;
+	}
+
+	ret = ipa_wigig_store_client_smmu_info(in, out->client);
+	if (ret)
+		goto fail_smmu;
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+
+fail_smmu:
+	ipa_disconn_wigig_pipe_i(out->client, &in->pipe_smmu, &in->dbuff_smmu);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_conn_client_smmu);
+
+static inline int ipa_wigig_validate_client_type(enum ipa_client_type client)
+{
+	switch (client) {
+	case IPA_CLIENT_WIGIG_PROD:
+	case IPA_CLIENT_WIGIG1_CONS:
+	case IPA_CLIENT_WIGIG2_CONS:
+	case IPA_CLIENT_WIGIG3_CONS:
+	case IPA_CLIENT_WIGIG4_CONS:
+		break;
+	default:
+		IPA_WIGIG_ERR("invalid client type %d\n", client);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int ipa_wigig_disconn_pipe(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	ret = ipa_wigig_validate_client_type(client);
+	if (ret)
+		return ret;
+
+	IPA_WIGIG_DBG("disconnecting ipa_client_type %d\n", client);
+
+	if (ipa_wigig_is_smmu_enabled()) {
+		struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu;
+		struct ipa_wigig_rx_pipe_data_buffer_info_smmu *rx_dbuff_smmu;
+		struct ipa_wigig_tx_pipe_data_buffer_info_smmu *tx_dbuff_smmu;
+
+		if (client == IPA_CLIENT_WIGIG_PROD) {
+			ret = ipa_wigig_get_rx_smmu_info(&pipe_smmu,
+				&rx_dbuff_smmu);
+			if (ret)
+				return ret;
+
+			ret = ipa_disconn_wigig_pipe_i(client,
+				pipe_smmu,
+				rx_dbuff_smmu);
+		} else {
+			ret = ipa_wigig_get_tx_smmu_info(&pipe_smmu,
+				&tx_dbuff_smmu, client);
+			if (ret)
+				return ret;
+
+			ret = ipa_disconn_wigig_pipe_i(client,
+				pipe_smmu,
+				tx_dbuff_smmu);
+		}
+
+	} else {
+		ret = ipa_disconn_wigig_pipe_i(client, NULL, NULL);
+	}
+
+	if (ret) {
+		IPA_WIGIG_ERR("couldn't disconnect client %d\n", client);
+		return ret;
+	}
+
+	/* RX will be disconnected last, deinit uC msi config */
+	if (client == IPA_CLIENT_WIGIG_PROD) {
+		ret = ipa_wigig_uc_msi_init(false,
+			ipa_wigig_ctx->periph_baddr_pa,
+			ipa_wigig_ctx->pseudo_cause_pa,
+			ipa_wigig_ctx->int_gen_tx_pa,
+			ipa_wigig_ctx->int_gen_rx_pa,
+			ipa_wigig_ctx->dma_ep_misc_pa);
+		if (ret) {
+			IPA_WIGIG_ERR("failed unmapping msi regs\n");
+			WARN_ON(1);
+		}
+	}
+	if (ipa_wigig_is_smmu_enabled())
+		ipa_wigig_clean_smmu_info(client);
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_disconn_pipe);
+
+int ipa_wigig_enable_pipe(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	ret = ipa_wigig_validate_client_type(client);
+	if (ret)
+		return ret;
+
+	IPA_WIGIG_DBG("enabling pipe %d\n", client);
+
+	ret = ipa_enable_wigig_pipe_i(client);
+	if (ret)
+		return ret;
+
+	/* do only when Rx pipe is enabled */
+	if (client == IPA_CLIENT_WIGIG_PROD) {
+		ret = ipa_pm_activate_sync(ipa_wigig_ctx->ipa_pm_hdl);
+		if (ret) {
+			IPA_WIGIG_ERR("fail to activate ipa pm\n");
+			ret = -EFAULT;
+			goto fail_pm_active;
+		}
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+
+fail_pm_active:
+	ipa_disable_wigig_pipe_i(client);
+	return ret;
+}
+EXPORT_SYMBOL(ipa_wigig_enable_pipe);
+
+int ipa_wigig_disable_pipe(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_WIGIG_DBG("\n");
+
+	ret = ipa_wigig_validate_client_type(client);
+	if (ret)
+		return ret;
+
+	ret = ipa_disable_wigig_pipe_i(client);
+	if (ret)
+		return ret;
+
+	/* do only when Rx pipe is disabled */
+	if (client == IPA_CLIENT_WIGIG_PROD) {
+		ret = ipa_pm_deactivate_sync(ipa_wigig_ctx->ipa_pm_hdl);
+		if (ret) {
+			IPA_WIGIG_ERR("fail to deactivate ipa pm\n");
+			return -EFAULT;
+		}
+	}
+
+	IPA_WIGIG_DBG("exit\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_disable_pipe);
+
+int ipa_wigig_tx_dp(enum ipa_client_type dst, struct sk_buff *skb)
+{
+	int ret;
+
+	IPA_WIGIG_DBG_LOW("\n");
+
+	ret = ipa_wigig_validate_client_type(dst);
+	if (unlikely(ret))
+		return ret;
+
+	ret = ipa_tx_dp(dst, skb, NULL);
+	if (unlikely(ret))
+		return ret;
+
+	IPA_WIGIG_DBG_LOW("exit\n");
+	return 0;
+}
+EXPORT_SYMBOL(ipa_wigig_tx_dp);
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 6f24fda..d897bfa 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/ipa_mhi.h>
@@ -13,6 +13,7 @@
 #include <linux/ipa.h>
 #include <linux/ipa_uc_offload.h>
 #include <linux/ipa_wdi3.h>
+#include <linux/ipa_wigig.h>
 #include <linux/ratelimit.h>
 
 #define WARNON_RATELIMIT_BURST 1
@@ -436,4 +437,29 @@ int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr);
 int ipa_ut_module_init(void);
 void ipa_ut_module_exit(void);
 
+int ipa_wigig_uc_init(
+	struct ipa_wdi_uc_ready_params *inout,
+	ipa_wigig_misc_int_cb int_notify,
+	phys_addr_t *uc_db_pa);
+
+int ipa_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out);
+
+int ipa_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out);
+
+int ipa_wigig_uc_msi_init(
+	bool init,
+	phys_addr_t periph_baddr_pa,
+	phys_addr_t pseudo_cause_pa,
+	phys_addr_t int_gen_tx_pa,
+	phys_addr_t int_gen_rx_pa,
+	phys_addr_t dma_ep_misc_pa);
+
+int ipa_disconn_wigig_pipe_i(enum ipa_client_type client,
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+	void *dbuff);
+
+int ipa_enable_wigig_pipe_i(enum ipa_client_type client);
+
+int ipa_disable_wigig_pipe_i(enum ipa_client_type client);
+
 #endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index 80d24cd..d1d462b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -6,7 +6,7 @@
 ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
 	ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
 	ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \
-	ipa_hw_stats.o ipa_pm.o ipa_wdi3_i.o ipa_odl.o
+	ipa_hw_stats.o ipa_pm.o ipa_wdi3_i.o ipa_odl.o ipa_wigig_i.o
 
 ipat-$(CONFIG_IPA_EMULATION) += ipa_dt_replacement.o
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index af7cd56..2598187 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -4770,6 +4770,12 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 	else
 		IPADBG(":wdi init ok\n");
 
+	result = ipa3_wigig_init_i();
+	if (result)
+		IPAERR(":wigig init failed (%d)\n", -result);
+	else
+		IPADBG(":wigig init ok\n");
+
 	result = ipa3_ntn_init();
 	if (result)
 		IPAERR(":ntn init failed (%d)\n", -result);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index a514a19..4f3e236 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -48,8 +48,6 @@
 /* less 1 nominal MTU (1500 bytes) rounded to units of KB */
 #define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
 
-#define IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(X) (X/1000)
-
 #define IPA_RX_BUFF_CLIENT_HEADROOM 256
 
 #define IPA_WLAN_RX_POOL_SZ 100
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 154875b..2162ceb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -247,6 +247,8 @@ enum {
 #define IPA_AGGR_STR_IN_BYTES(str) \
 	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
 
+#define IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(X) (X/1000)
+
 #define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
 
 #define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
@@ -1084,6 +1086,7 @@ struct ipa3_nat_ipv6ct_common_mem {
  * @index_table_expansion_addr: index expansion table address
  * @public_ip_addr: ip address of nat table
  * @pdn_mem: pdn config table SW cache memory structure
+ * @is_tmp_mem_allocated: indicate if tmp mem has been allocated
  */
 struct ipa3_nat_mem {
 	struct ipa3_nat_ipv6ct_common_mem dev;
@@ -1091,6 +1094,7 @@ struct ipa3_nat_mem {
 	char *index_table_expansion_addr;
 	u32 public_ip_addr;
 	struct ipa_mem_buffer pdn_mem;
+	bool is_tmp_mem_allocated;
 };
 
 /**
@@ -1278,6 +1282,18 @@ struct ipa3_uc_wdi_ctx {
 };
 
 /**
+ * struct ipa3_uc_wigig_ctx
+ * @priv: wigig driver private data
+ * @uc_ready_cb: wigig driver uc ready callback
+ * @int_notify: wigig driver misc interrupt callback
+ */
+struct ipa3_uc_wigig_ctx {
+	void *priv;
+	ipa_uc_ready_cb uc_ready_cb;
+	ipa_wigig_misc_int_cb misc_notify_cb;
+};
+
+/**
  * struct ipa3_wdi2_ctx - IPA wdi2 context
  */
 struct ipa3_wdi2_ctx {
@@ -1490,6 +1506,7 @@ struct ipa3_char_device_context {
  * @wcstats: wlan common buffer stats
  * @uc_ctx: uC interface context
  * @uc_wdi_ctx: WDI specific fields for uC interface
+ * @uc_wigig_ctx: WIGIG specific fields for uC interface
  * @ipa_num_pipes: The number of pipes used by IPA HW
  * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided
  * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA
@@ -1613,6 +1630,7 @@ struct ipa3_context {
 
 	struct ipa3_uc_wdi_ctx uc_wdi_ctx;
 	struct ipa3_uc_ntn_ctx uc_ntn_ctx;
+	struct ipa3_uc_wigig_ctx uc_wigig_ctx;
 	u32 wan_rx_ring_size;
 	u32 lan_rx_ring_size;
 	bool skip_uc_pipe_reset;
@@ -2210,6 +2228,26 @@ int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
 int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
 int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
 
+int ipa3_conn_wigig_rx_pipe_i(void *in,
+	struct ipa_wigig_conn_out_params *out);
+
+int ipa3_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out);
+
+int ipa3_wigig_uc_msi_init(bool init,
+	phys_addr_t periph_baddr_pa,
+	phys_addr_t pseudo_cause_pa,
+	phys_addr_t int_gen_tx_pa,
+	phys_addr_t int_gen_rx_pa,
+	phys_addr_t dma_ep_misc_pa);
+
+int ipa3_disconn_wigig_pipe_i(enum ipa_client_type client,
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+	void *dbuff);
+
+int ipa3_enable_wigig_pipe_i(enum ipa_client_type client);
+
+int ipa3_disable_wigig_pipe_i(enum ipa_client_type client);
+
 /*
  * To retrieve doorbell physical address of
  * wlan pipes
@@ -2516,6 +2554,12 @@ void ipa3_tag_destroy_imm(void *user1, int user2);
 const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
 	(enum ipa_client_type client);
 
+int ipa3_wigig_init_i(void);
+int ipa3_wigig_uc_init(
+	struct ipa_wdi_uc_ready_params *inout,
+	ipa_wigig_misc_int_cb int_notify,
+	phys_addr_t *uc_db_pa);
+
 /* Hardware stats */
 
 #define IPA_STATS_MAX_PIPE_BIT 32
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 378bf92..774c9af 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/device.h>
@@ -272,14 +272,21 @@ static void ipa3_nat_ipv6ct_destroy_device(
 
 	mutex_lock(&dev->lock);
 
-	dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
-		dev->tmp_mem->vaddr, dev->tmp_mem->dma_handle);
-	kfree(dev->tmp_mem);
+	if (dev->tmp_mem != NULL &&
+		!(ipa3_ctx->nat_mem.is_tmp_mem_allocated)) {
+		dev->tmp_mem = NULL;
+	} else if (dev->tmp_mem != NULL &&
+		ipa3_ctx->nat_mem.is_tmp_mem_allocated) {
+		dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
+			dev->tmp_mem->vaddr, dev->tmp_mem->dma_handle);
+		kfree(dev->tmp_mem);
+		dev->tmp_mem = NULL;
+		ipa3_ctx->nat_mem.is_tmp_mem_allocated = false;
+	}
 	device_destroy(dev->class, dev->dev_num);
 	unregister_chrdev_region(dev->dev_num, 1);
 	class_destroy(dev->class);
 	dev->is_dev_init = false;
-
 	mutex_unlock(&dev->lock);
 
 	IPADBG("return\n");
@@ -302,10 +309,15 @@ int ipa3_nat_ipv6ct_init_devices(void)
 	/*
 	 * Allocate NAT/IPv6CT temporary memory. The memory is never deleted,
 	 * because provided to HW once NAT or IPv6CT table is deleted.
-	 * NULL is a legal value
 	 */
 	tmp_mem = ipa3_nat_ipv6ct_allocate_tmp_memory();
 
+	if (tmp_mem == NULL) {
+		IPAERR("unable to allocate tmp_mem\n");
+		return -ENOMEM;
+	}
+	ipa3_ctx->nat_mem.is_tmp_mem_allocated = true;
+
 	if (ipa3_nat_ipv6ct_init_device(
 		&ipa3_ctx->nat_mem.dev,
 		IPA_NAT_DEV_NAME,
@@ -334,10 +346,11 @@ int ipa3_nat_ipv6ct_init_devices(void)
 fail_init_ipv6ct_dev:
 	ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->nat_mem.dev);
 fail_init_nat_dev:
-	if (tmp_mem != NULL) {
+	if (tmp_mem != NULL && ipa3_ctx->nat_mem.is_tmp_mem_allocated) {
 		dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
 			tmp_mem->vaddr, tmp_mem->dma_handle);
 		kfree(tmp_mem);
+		ipa3_ctx->nat_mem.is_tmp_mem_allocated = false;
 	}
 	return result;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 8a01254..8eeebd9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "ipa_i.h"
@@ -518,6 +518,21 @@ static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 }
 
+static void ipa3_uc_wigig_misc_int_handler(enum ipa_irq_type interrupt,
+	void *private_data,
+	void *interrupt_data)
+{
+	IPADBG("\n");
+
+	WARN_ON(private_data != ipa3_ctx);
+
+	if (ipa3_ctx->uc_wigig_ctx.misc_notify_cb)
+		ipa3_ctx->uc_wigig_ctx.misc_notify_cb(
+			ipa3_ctx->uc_wigig_ctx.priv);
+
+	IPADBG("exit\n");
+}
+
 static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode,
 	u32 expected_status, bool polling_mode, unsigned long timeout_jiffies)
 {
@@ -681,7 +696,7 @@ int ipa3_uc_interface_init(void)
 		ipa3_uc_event_handler, true,
 		ipa3_ctx);
 	if (result) {
-		IPAERR("Fail to register for UC_IRQ0 rsp interrupt\n");
+		IPAERR("Fail to register for UC_IRQ0 event interrupt\n");
 		result = -EFAULT;
 		goto irq_fail0;
 	}
@@ -695,11 +710,21 @@ int ipa3_uc_interface_init(void)
 		goto irq_fail1;
 	}
 
+	result = ipa3_add_interrupt_handler(IPA_UC_IRQ_2,
+		ipa3_uc_wigig_misc_int_handler, true,
+		ipa3_ctx);
+	if (result) {
+		IPAERR("fail to register for UC_IRQ2 wigig misc interrupt\n");
+		result = -EFAULT;
+		goto irq_fail2;
+	}
+
 	ipa3_ctx->uc_ctx.uc_inited = true;
 
 	IPADBG("IPA uC interface is initialized\n");
 	return 0;
-
+irq_fail2:
+	ipa3_remove_interrupt_handler(IPA_UC_IRQ_1);
 irq_fail1:
 	ipa3_remove_interrupt_handler(IPA_UC_IRQ_0);
 irq_fail0:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 601e8325a..c4cc0c265 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -6384,6 +6384,13 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
 	api_ctrl->ipa_get_smmu_params = ipa3_get_smmu_params;
 	api_ctrl->ipa_is_vlan_mode = ipa3_is_vlan_mode;
 	api_ctrl->ipa_pm_is_used = ipa3_pm_is_used;
+	api_ctrl->ipa_wigig_uc_init = ipa3_wigig_uc_init;
+	api_ctrl->ipa_conn_wigig_rx_pipe_i = ipa3_conn_wigig_rx_pipe_i;
+	api_ctrl->ipa_conn_wigig_client_i = ipa3_conn_wigig_client_i;
+	api_ctrl->ipa_disconn_wigig_pipe_i = ipa3_disconn_wigig_pipe_i;
+	api_ctrl->ipa_wigig_uc_msi_init = ipa3_wigig_uc_msi_init;
+	api_ctrl->ipa_enable_wigig_pipe_i = ipa3_enable_wigig_pipe_i;
+	api_ctrl->ipa_disable_wigig_pipe_i = ipa3_disable_wigig_pipe_i;
 
 	return 0;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
new file mode 100644
index 0000000..7e6ac9a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
@@ -0,0 +1,1500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipa_i.h"
+#include <linux/if_ether.h>
+#include <linux/log2.h>
+#include <linux/ipa_wigig.h>
+
+#define IPA_WIGIG_DESC_RING_EL_SIZE	32
+
+#define GSI_STOP_MAX_RETRY_CNT 10
+
+#define IPA_WIGIG_CONNECTED BIT(0)
+#define IPA_WIGIG_ENABLED BIT(1)
+#define IPA_WIGIG_MSB_MASK 0xFFFFFFFF00000000
+#define IPA_WIGIG_LSB_MASK 0x00000000FFFFFFFF
+#define IPA_WIGIG_MSB(num) ((u32)((num & IPA_WIGIG_MSB_MASK) >> 32))
+#define IPA_WIGIG_LSB(num) ((u32)(num & IPA_WIGIG_LSB_MASK))
+#define W11AD_RX 0
+#define W11AD_TX 1
+#define W11AD_TO_GSI_DB_m 1
+#define W11AD_TO_GSI_DB_n 1
+
+
+static int ipa3_wigig_uc_loaded_handler(struct notifier_block *self,
+	unsigned long val, void *data)
+{
+	IPADBG("val %d\n", val);
+
+	if (!ipa3_ctx) {
+		IPAERR("IPA ctx is null\n");
+		return -EINVAL;
+	}
+
+	WARN_ON(data != ipa3_ctx);
+
+	if (ipa3_ctx->uc_wigig_ctx.uc_ready_cb) {
+		ipa3_ctx->uc_wigig_ctx.uc_ready_cb(
+			ipa3_ctx->uc_wigig_ctx.priv);
+
+		ipa3_ctx->uc_wigig_ctx.uc_ready_cb =
+			NULL;
+		ipa3_ctx->uc_wigig_ctx.priv = NULL;
+	}
+
+	IPADBG("exit\n");
+	return 0;
+}
+
+static struct notifier_block uc_loaded_notifier = {
+	.notifier_call = ipa3_wigig_uc_loaded_handler,
+};
+
+int ipa3_wigig_init_i(void)
+{
+	IPADBG("\n");
+
+	ipa3_uc_register_ready_cb(&uc_loaded_notifier);
+
+	IPADBG("exit\n");
+
+	return 0;
+}
+
+int ipa3_wigig_uc_init(
+	struct ipa_wdi_uc_ready_params *inout,
+	ipa_wigig_misc_int_cb int_notify,
+	phys_addr_t *uc_db_pa)
+{
+	int result = 0;
+
+	IPADBG("\n");
+
+	if (inout == NULL) {
+		IPAERR("inout is NULL");
+		return -EINVAL;
+	}
+
+	if (int_notify == NULL) {
+		IPAERR("int_notify is NULL");
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result) {
+		inout->is_uC_ready = false;
+		ipa3_ctx->uc_wigig_ctx.uc_ready_cb = inout->notify;
+		ipa3_ctx->uc_wigig_ctx.priv = inout->priv;
+	} else {
+		inout->is_uC_ready = true;
+	}
+
+	ipa3_ctx->uc_wigig_ctx.misc_notify_cb = int_notify;
+
+	*uc_db_pa = ipa3_ctx->ipa_wrapper_base +
+		ipahal_get_reg_base() +
+		ipahal_get_reg_mn_ofst(
+			IPA_UC_MAILBOX_m_n,
+			W11AD_TO_GSI_DB_m,
+			W11AD_TO_GSI_DB_n);
+
+	IPADBG("exit\n");
+
+	return 0;
+}
+
+static int ipa3_wigig_tx_bit_to_ep(
+	const u8 tx_bit_num,
+	enum ipa_client_type *type)
+{
+	IPADBG("tx_bit_num %d\n", tx_bit_num);
+
+	switch (tx_bit_num) {
+	case 2:
+		*type = IPA_CLIENT_WIGIG1_CONS;
+		break;
+	case 3:
+		*type = IPA_CLIENT_WIGIG2_CONS;
+		break;
+	case 4:
+		*type = IPA_CLIENT_WIGIG3_CONS;
+		break;
+	case 5:
+		*type = IPA_CLIENT_WIGIG4_CONS;
+		break;
+	default:
+		IPAERR("invalid tx_bit_num %d\n", tx_bit_num);
+		return -EINVAL;
+	}
+
+	IPADBG("exit\n");
+	return 0;
+}
+
+static int ipa3_wigig_smmu_map_channel(bool Rx,
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+	void *buff,
+	bool map)
+{
+	int result = 0;
+
+	IPADBG("\n");
+
+	/*
+	 * --------------------------------------------------------------------
+	 *  entity         |HWHEAD|HWTAIL|HWHEAD|HWTAIL| misc | buffers| rings|
+	 *                 |Sring |Sring |Dring |Dring | regs |        |      |
+	 * --------------------------------------------------------------------
+	 *  GSI (apps CB)  |  TX  |RX, TX|      |RX, TX|      |        |Rx, TX|
+	 * --------------------------------------------------------------------
+	 *  IPA (WLAN CB)  |      |      |      |      |      | RX, TX |      |
+	 * --------------------------------------------------------------------
+	 *  uc (uC CB)     |  RX  |      |  TX  |      |always|        |      |
+	 * --------------------------------------------------------------------
+	 */
+
+	if (Rx) {
+		result = ipa3_smmu_map_peer_reg(
+			pipe_smmu->status_ring_HWHEAD_pa,
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR(
+				"failed to %s status_ring_HWAHEAD %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail_status_HWHEAD;
+		}
+	} else {
+
+		result = ipa3_smmu_map_peer_reg(
+			pipe_smmu->status_ring_HWHEAD_pa,
+			map,
+			IPA_SMMU_CB_AP);
+		if (result) {
+			IPAERR(
+				"failed to %s status_ring_HWAHEAD %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail_status_HWHEAD;
+		}
+
+		result = ipa3_smmu_map_peer_reg(
+			pipe_smmu->desc_ring_HWHEAD_pa,
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR("failed to %s desc_ring_HWHEAD %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail;
+		}
+	}
+
+	result = ipa3_smmu_map_peer_reg(
+		pipe_smmu->status_ring_HWTAIL_pa,
+		map,
+		IPA_SMMU_CB_AP);
+	if (result) {
+		IPAERR(
+			"failed to %s status_ring_HWTAIL %d\n",
+			map ? "map" : "unmap",
+			result);
+		goto fail_status_HWTAIL;
+	}
+
+	result = ipa3_smmu_map_peer_reg(
+		pipe_smmu->desc_ring_HWTAIL_pa,
+		map,
+		IPA_SMMU_CB_AP);
+	if (result) {
+		IPAERR("failed to %s desc_ring_HWTAIL %d\n",
+			map ? "map" : "unmap",
+			result);
+		goto fail_desc_HWTAIL;
+	}
+
+	result = ipa3_smmu_map_peer_buff(
+		pipe_smmu->desc_ring_base_iova,
+		pipe_smmu->desc_ring_size,
+		map,
+		&pipe_smmu->desc_ring_base,
+		IPA_SMMU_CB_AP);
+	if (result) {
+		IPAERR("failed to %s desc_ring_base %d\n",
+			map ? "map" : "unmap",
+			result);
+		goto fail_desc_ring;
+	}
+
+	result = ipa3_smmu_map_peer_buff(
+		pipe_smmu->status_ring_base_iova,
+		pipe_smmu->status_ring_size,
+		map,
+		&pipe_smmu->status_ring_base,
+		IPA_SMMU_CB_AP);
+	if (result) {
+		IPAERR("failed to %s status_ring_base %d\n",
+			map ? "map" : "unmap",
+			result);
+		goto fail_status_ring;
+	}
+
+	if (Rx) {
+		struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu =
+			(struct ipa_wigig_rx_pipe_data_buffer_info_smmu *)buff;
+
+		int num_elem =
+			pipe_smmu->desc_ring_size /
+			IPA_WIGIG_DESC_RING_EL_SIZE;
+
+		result = ipa3_smmu_map_peer_buff(
+			dbuff_smmu->data_buffer_base_iova,
+			dbuff_smmu->data_buffer_size * num_elem,
+			map,
+			&dbuff_smmu->data_buffer_base,
+			IPA_SMMU_CB_WLAN);
+		if (result) {
+			IPAERR(
+				"failed to %s rx data_buffer %d, num elem %d\n"
+				, map ? "map" : "unmap",
+				result, num_elem);
+			goto fail_map_buff;
+		}
+
+	} else {
+		int i;
+		struct ipa_wigig_tx_pipe_data_buffer_info_smmu *dbuff_smmu =
+			(struct ipa_wigig_tx_pipe_data_buffer_info_smmu *)buff;
+
+		for (i = 0; i < dbuff_smmu->num_buffers; i++) {
+			result = ipa3_smmu_map_peer_buff(
+				*(dbuff_smmu->data_buffer_base_iova + i),
+				dbuff_smmu->data_buffer_size,
+				map,
+				(dbuff_smmu->data_buffer_base + i),
+				IPA_SMMU_CB_WLAN);
+			if (result) {
+				IPAERR(
+					"%d: failed to %s tx data buffer %d\n"
+					, i, map ? "map" : "unmap",
+					result);
+				for (i--; i >= 0; i--) {
+					result = ipa3_smmu_map_peer_buff(
+					*(dbuff_smmu->data_buffer_base_iova +
+						i),
+					dbuff_smmu->data_buffer_size,
+					!map,
+					(dbuff_smmu->data_buffer_base +
+						i),
+					IPA_SMMU_CB_WLAN);
+				}
+				goto fail_map_buff;
+			}
+		}
+	}
+
+	IPADBG("exit\n");
+
+	return 0;
+fail_map_buff:
+	result = ipa3_smmu_map_peer_buff(
+		pipe_smmu->status_ring_base_iova, pipe_smmu->status_ring_size,
+		!map, &pipe_smmu->status_ring_base,
+		IPA_SMMU_CB_AP);
+fail_status_ring:
+	ipa3_smmu_map_peer_buff(
+		pipe_smmu->desc_ring_base_iova, pipe_smmu->desc_ring_size,
+		!map, &pipe_smmu->desc_ring_base,
+		IPA_SMMU_CB_AP);
+fail_desc_ring:
+	ipa3_smmu_map_peer_reg(
+		pipe_smmu->status_ring_HWTAIL_pa, !map, IPA_SMMU_CB_AP);
+fail_status_HWTAIL:
+	if (Rx)
+		ipa3_smmu_map_peer_reg(pipe_smmu->status_ring_HWHEAD_pa,
+			!map, IPA_SMMU_CB_UC);
+fail_status_HWHEAD:
+	ipa3_smmu_map_peer_reg(
+		pipe_smmu->desc_ring_HWTAIL_pa, !map, IPA_SMMU_CB_AP);
+fail_desc_HWTAIL:
+	ipa3_smmu_map_peer_reg(
+		pipe_smmu->desc_ring_HWHEAD_pa, !map, IPA_SMMU_CB_UC);
+fail:
+	return result;
+}
+
+static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+		IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
+		break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+		IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_CHAN_HWO_1_ERR:
+		IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_EVT_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_EVT_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_EVT_EVT_RING_EMPTY_ERR:
+		IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+static int ipa3_wigig_config_gsi(bool Rx,
+	bool smmu_en,
+	void *pipe_info,
+	void *buff,
+	const struct ipa_gsi_ep_config *ep_gsi,
+	struct ipa3_ep_context *ep)
+{
+	struct gsi_evt_ring_props evt_props;
+	struct gsi_chan_props channel_props;
+	union __packed gsi_channel_scratch gsi_scratch;
+	int gsi_res;
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu;
+	struct ipa_wigig_pipe_setup_info *pipe;
+	struct ipa_wigig_rx_pipe_data_buffer_info *rx_dbuff;
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu *rx_dbuff_smmu;
+	struct ipa_wigig_tx_pipe_data_buffer_info *tx_dbuff;
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu *tx_dbuff_smmu;
+
+	/* alloc event ring */
+	memset(&evt_props, 0, sizeof(evt_props));
+	evt_props.intf = GSI_EVT_CHTYPE_11AD_EV;
+	evt_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+	evt_props.intr = GSI_INTR_MSI;
+	evt_props.intvec = 0;
+	evt_props.exclusive = true;
+	evt_props.err_cb = ipa_gsi_evt_ring_err_cb;
+	evt_props.user_data = NULL;
+	evt_props.int_modc = 1;
+	evt_props.int_modt = 1;
+	evt_props.ring_base_vaddr = NULL;
+
+	if (smmu_en) {
+		pipe_smmu = (struct ipa_wigig_pipe_setup_info_smmu *)pipe_info;
+		evt_props.ring_base_addr =
+			pipe_smmu->desc_ring_base_iova;
+		evt_props.ring_len = pipe_smmu->desc_ring_size;
+		evt_props.msi_addr = pipe_smmu->desc_ring_HWTAIL_pa;
+	} else {
+		pipe = (struct ipa_wigig_pipe_setup_info *)pipe_info;
+		evt_props.ring_base_addr = pipe->desc_ring_base_pa;
+		evt_props.ring_len = pipe->desc_ring_size;
+		evt_props.msi_addr = pipe->desc_ring_HWTAIL_pa;
+	}
+
+	gsi_res = gsi_alloc_evt_ring(&evt_props,
+		ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_evt_ring_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error allocating event ring: %d\n", gsi_res);
+		return -EFAULT;
+	}
+
+	/* event scratch not configured by SW for TX channels */
+	if (Rx) {
+		union __packed gsi_evt_scratch evt_scratch;
+
+		memset(&evt_scratch, 0, sizeof(evt_scratch));
+		evt_scratch.w11ad.update_status_hwtail_mod_threshold = 1;
+		gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
+			evt_scratch);
+		if (gsi_res != GSI_STATUS_SUCCESS) {
+			IPAERR("Error writing WIGIG event ring scratch: %d\n",
+				gsi_res);
+			goto fail_write_evt_scratch;
+		}
+	}
+
+	ep->gsi_mem_info.evt_ring_len = evt_props.ring_len;
+	ep->gsi_mem_info.evt_ring_base_addr = evt_props.ring_base_addr;
+	ep->gsi_mem_info.evt_ring_base_vaddr = evt_props.ring_base_vaddr;
+
+	/* alloc channel ring */
+	memset(&channel_props, 0, sizeof(channel_props));
+	memset(&gsi_scratch, 0, sizeof(gsi_scratch));
+
+	if (Rx)
+		channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+	else
+		channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
+
+	channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	channel_props.prot - GSI_CHAN_PROT_11AD;
+	channel_props.ch_id = ep_gsi->ipa_gsi_chan_num;
+	channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	channel_props.xfer_cb = NULL;
+
+	channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	channel_props.prefetch_mode = ep_gsi->prefetch_mode;
+	channel_props.low_weight = 1;
+	channel_props.err_cb = ipa_gsi_chan_err_cb;
+
+	channel_props.ring_base_vaddr = NULL;
+
+	if (Rx) {
+		if (smmu_en) {
+			rx_dbuff_smmu =
+			(struct ipa_wigig_rx_pipe_data_buffer_info_smmu *)buff;
+
+			channel_props.ring_base_addr =
+				pipe_smmu->status_ring_base_iova;
+			channel_props.ring_len =
+				pipe_smmu->status_ring_size;
+
+			gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb =
+				IPA_WIGIG_LSB(
+					pipe_smmu->status_ring_HWTAIL_pa);
+			gsi_scratch.rx_11ad.status_ring_hwtail_address_msb =
+				IPA_WIGIG_MSB(
+					pipe_smmu->status_ring_HWTAIL_pa);
+
+			gsi_scratch.rx_11ad.data_buffers_base_address_lsb =
+				IPA_WIGIG_LSB(
+					rx_dbuff_smmu->data_buffer_base_iova);
+			gsi_scratch.rx_11ad.data_buffers_base_address_msb =
+				IPA_WIGIG_MSB(
+					rx_dbuff_smmu->data_buffer_base_iova);
+			gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2 =
+				ilog2(rx_dbuff_smmu->data_buffer_size);
+		} else {
+			rx_dbuff =
+			(struct ipa_wigig_rx_pipe_data_buffer_info *)buff;
+
+			channel_props.ring_base_addr =
+				pipe->status_ring_base_pa;
+			channel_props.ring_len = pipe->status_ring_size;
+
+			gsi_scratch.rx_11ad.status_ring_hwtail_address_lsb =
+				IPA_WIGIG_LSB(pipe->status_ring_HWTAIL_pa);
+			gsi_scratch.rx_11ad.status_ring_hwtail_address_msb =
+				IPA_WIGIG_MSB(pipe->status_ring_HWTAIL_pa);
+
+			gsi_scratch.rx_11ad.data_buffers_base_address_lsb =
+				IPA_WIGIG_LSB(rx_dbuff->data_buffer_base_pa);
+			gsi_scratch.rx_11ad.data_buffers_base_address_msb =
+				IPA_WIGIG_MSB(rx_dbuff->data_buffer_base_pa);
+			gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2 =
+				ilog2(rx_dbuff->data_buffer_size);
+		}
+		IPADBG("fixed_data_buffer_size_pow_2 %d\n",
+			gsi_scratch.rx_11ad.fixed_data_buffer_size_pow_2);
+	} else {
+		if (smmu_en) {
+			tx_dbuff_smmu =
+			(struct ipa_wigig_tx_pipe_data_buffer_info_smmu *)buff;
+			channel_props.ring_base_addr =
+				pipe_smmu->desc_ring_base_iova;
+			channel_props.ring_len =
+				pipe_smmu->desc_ring_size;
+
+			gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb =
+				IPA_WIGIG_LSB(
+					pipe_smmu->status_ring_HWTAIL_pa);
+			gsi_scratch.tx_11ad.status_ring_hwtail_address_msb =
+				IPA_WIGIG_MSB(
+					pipe_smmu->status_ring_HWTAIL_pa);
+
+			gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
+				ilog2(tx_dbuff_smmu->data_buffer_size);
+		} else {
+			tx_dbuff =
+			(struct ipa_wigig_tx_pipe_data_buffer_info *)buff;
+
+			channel_props.ring_base_addr = pipe->desc_ring_base_pa;
+			channel_props.ring_len = pipe->desc_ring_size;
+
+			gsi_scratch.tx_11ad.status_ring_hwtail_address_lsb =
+				IPA_WIGIG_LSB(
+					pipe->status_ring_HWTAIL_pa);
+			gsi_scratch.tx_11ad.status_ring_hwtail_address_msb =
+				IPA_WIGIG_MSB(
+					pipe->status_ring_HWTAIL_pa);
+
+			gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2 =
+				ilog2(tx_dbuff->data_buffer_size);
+		}
+		IPADBG("fixed_data_buffer_size_pow_2 %d\n",
+			gsi_scratch.tx_11ad.fixed_data_buffer_size_pow_2);
+	}
+
+	IPADBG("ch_id: %d\n", channel_props.ch_id);
+	IPADBG("evt_ring_hdl: %ld\n", channel_props.evt_ring_hdl);
+	IPADBG("re_size: %d\n", channel_props.re_size);
+	IPADBG("GSI channel ring len: %d\n", channel_props.ring_len);
+	IPADBG("channel ring  base addr = 0x%llX\n",
+		(unsigned long long)channel_props.ring_base_addr);
+
+	IPADBG("Allocating GSI channel\n");
+	gsi_res = gsi_alloc_channel(&channel_props,
+		ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS)
+		goto fail_alloc_channel;
+
+	ep->gsi_mem_info.chan_ring_len = channel_props.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr = channel_props.ring_base_addr;
+	ep->gsi_mem_info.chan_ring_base_vaddr =
+		channel_props.ring_base_vaddr;
+
+	gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+		gsi_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("gsi_write_channel_scratch failed %d\n",
+			gsi_res);
+		goto fail_write_channel_scratch;
+	}
+
+fail_write_channel_scratch:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+fail_alloc_channel:
+fail_write_evt_scratch:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	return -EFAULT;
+}
+
+static int ipa3_wigig_config_uc(bool init,
+	bool Rx,
+	u8 wifi_ch,
+	u8 gsi_ch,
+	phys_addr_t HWHEAD)
+{
+	struct ipa_mem_buffer cmd;
+	enum ipa_cpu_2_hw_offload_commands command;
+	int result;
+
+	IPADBG("%s\n", init ? "init" : "Deinit");
+	if (init) {
+		struct IpaHwOffloadSetUpCmdData_t_v4_0 *cmd_data;
+
+		cmd.size = sizeof(*cmd_data);
+		cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+		if (cmd.base == NULL) {
+			IPAERR("fail to get DMA memory.\n");
+			return -ENOMEM;
+		}
+
+		cmd_data =
+			(struct IpaHwOffloadSetUpCmdData_t_v4_0 *)cmd.base;
+
+		cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
+		cmd_data->SetupCh_params.W11AdSetupCh_params.dir =
+			Rx ? W11AD_RX : W11AD_TX;
+		cmd_data->SetupCh_params.W11AdSetupCh_params.gsi_ch = gsi_ch;
+		cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_ch = wifi_ch;
+		cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_hp_addr_msb =
+			IPA_WIGIG_MSB(HWHEAD);
+		cmd_data->SetupCh_params.W11AdSetupCh_params.wifi_hp_addr_lsb =
+			IPA_WIGIG_LSB(HWHEAD);
+		command = IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP;
+
+	} else {
+		struct IpaHwOffloadCommonChCmdData_t_v4_0 *cmd_data;
+
+		cmd.size = sizeof(*cmd_data);
+		cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+		if (cmd.base == NULL) {
+			IPAERR("fail to get DMA memory.\n");
+			return -ENOMEM;
+		}
+
+		cmd_data =
+			(struct IpaHwOffloadCommonChCmdData_t_v4_0 *)cmd.base;
+
+		cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
+		cmd_data->CommonCh_params.W11AdCommonCh_params.gsi_ch = gsi_ch;
+		command = IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		command,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+	if (result) {
+		IPAERR("fail to %s uc for %s gsi channel %d\n",
+			init ? "init" : "deinit",
+			Rx ? "Rx" : "Tx", gsi_ch);
+	}
+
+	dma_free_coherent(ipa3_ctx->uc_pdev,
+		cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("exit\n");
+	return result;
+}
+
+int ipa3_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out)
+{
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg ep_cfg;
+	enum ipa_client_type rx_client = IPA_CLIENT_WIGIG_PROD;
+	bool is_smmu_enabled;
+	struct ipa_wigig_conn_rx_in_params_smmu *input_smmu = NULL;
+	struct ipa_wigig_conn_rx_in_params *input = NULL;
+	const struct ipa_gsi_ep_config *ep_gsi;
+	void *pipe_info;
+	void *buff;
+	phys_addr_t status_ring_HWHEAD_pa;
+	int result;
+
+	IPADBG("\n");
+
+	ipa_ep_idx = ipa_get_ep_mapping(rx_client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to get ep (IPA_CLIENT_WIGIG_PROD) %d.\n",
+			ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid) {
+		IPAERR("EP %d already allocated.\n", ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	if (ep->gsi_offload_state) {
+		IPAERR("WIGIG channel bad state 0x%X\n",
+			ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	ep_gsi = ipa3_get_gsi_ep_info(rx_client);
+	if (!ep_gsi) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+			rx_client);
+		return -EPERM;
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* setup rx ep cfg */
+	ep->valid = 1;
+	ep->client = rx_client;
+	result = ipa3_disable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return -EFAULT;
+	}
+
+	is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN];
+	if (is_smmu_enabled) {
+		struct ipa_wigig_rx_pipe_data_buffer_info_smmu *dbuff_smmu;
+
+		input_smmu = (struct ipa_wigig_conn_rx_in_params_smmu *)in;
+		dbuff_smmu = &input_smmu->dbuff_smmu;
+		ep->client_notify = input_smmu->notify;
+		ep->priv = input_smmu->priv;
+
+		if (IPA_WIGIG_MSB(
+			dbuff_smmu->data_buffer_base_iova) &
+			0xFFFFFF00) {
+			IPAERR(
+				"data_buffers_base_address_msb is over the 8 bit limit (0xpa)\n"
+				, &dbuff_smmu->data_buffer_base_iova);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return -EFAULT;
+		}
+		if (dbuff_smmu->data_buffer_size >> 16) {
+			IPAERR(
+				"data_buffer_size is over the 16 bit limit (0x%X)\n"
+				, dbuff_smmu->data_buffer_size);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return -EFAULT;
+		}
+	} else {
+		input = (struct ipa_wigig_conn_rx_in_params *)in;
+		ep->client_notify = input->notify;
+		ep->priv = input->priv;
+
+		if (
+		IPA_WIGIG_MSB(input->dbuff.data_buffer_base_pa) & 0xFFFFFF00) {
+			IPAERR(
+				"data_buffers_base_address_msb is over the 8 bit limit (0xpa)\n"
+				, &input->dbuff.data_buffer_base_pa);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return -EFAULT;
+		}
+		if (input->dbuff.data_buffer_size >> 16) {
+			IPAERR(
+				"data_buffer_size is over the 16 bit limit (0x%X)\n"
+				, input->dbuff.data_buffer_size);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return -EFAULT;
+		}
+	}
+
+	memset(&ep_cfg, 0, sizeof(ep_cfg));
+	ep_cfg.nat.nat_en = IPA_SRC_NAT;
+	ep_cfg.hdr.hdr_len = ETH_HLEN;
+	ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
+	ep_cfg.hdr.hdr_ofst_pkt_size = 0;
+	ep_cfg.hdr.hdr_additional_const_len = 0;
+	ep_cfg.hdr_ext.hdr_little_endian = true;
+	ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
+	ep_cfg.hdr.hdr_metadata_reg_valid = 1;
+	ep_cfg.mode.mode = IPA_BASIC;
+
+
+	if (ipa3_cfg_ep(ipa_ep_idx, &ep->cfg)) {
+		IPAERR("fail to setup rx pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (is_smmu_enabled) {
+		result = ipa3_wigig_smmu_map_channel(true,
+			&input_smmu->pipe_smmu,
+			&input_smmu->dbuff_smmu,
+			true);
+		if (result) {
+			IPAERR("failed to setup rx pipe smmu map\n");
+			result = -EFAULT;
+			goto fail;
+		}
+
+		pipe_info = &input_smmu->pipe_smmu;
+		buff = &input_smmu->dbuff_smmu;
+		status_ring_HWHEAD_pa =
+			input_smmu->pipe_smmu.status_ring_HWHEAD_pa;
+	} else {
+		pipe_info = &input->pipe;
+		buff = &input->dbuff;
+		status_ring_HWHEAD_pa =
+			input->pipe.status_ring_HWHEAD_pa;
+	}
+
+	result = ipa3_wigig_config_uc(
+		true, true, 0,
+		ep_gsi->ipa_gsi_chan_num,
+		status_ring_HWHEAD_pa);
+		if (result)
+			goto fail_uc_config;
+
+	result = ipa3_wigig_config_gsi(true,
+		is_smmu_enabled,
+		pipe_info,
+		buff,
+		ep_gsi, ep);
+	if (result)
+		goto fail_gsi;
+
+	ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	out->client = IPA_CLIENT_WIGIG_PROD;
+	ep->gsi_offload_state |= IPA_WIGIG_CONNECTED;
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("wigig rx pipe connected successfully\n");
+	IPADBG("exit\n");
+
+	return 0;
+
+fail_gsi:
+	ipa3_wigig_config_uc(
+		false, true, 0,
+		ep_gsi->ipa_gsi_chan_num,
+		status_ring_HWHEAD_pa);
+fail_uc_config:
+	if (input_smmu)
+		ipa3_wigig_smmu_map_channel(true, &input_smmu->pipe_smmu,
+			&input_smmu->dbuff_smmu, false);
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+int ipa3_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out)
+{
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg ep_cfg;
+	enum ipa_client_type tx_client;
+	bool is_smmu_enabled;
+	struct ipa_wigig_conn_tx_in_params_smmu *input_smmu = NULL;
+	struct ipa_wigig_conn_tx_in_params *input = NULL;
+	const struct ipa_gsi_ep_config *ep_gsi;
+	u32 aggr_byte_limit;
+	int result;
+	void *pipe_info;
+	void *buff;
+	phys_addr_t desc_ring_HWHEAD_pa;
+	u8 wifi_ch;
+
+	IPADBG("\n");
+
+	is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN];
+	if (is_smmu_enabled) {
+		input_smmu = (struct ipa_wigig_conn_tx_in_params_smmu *)in;
+		if (ipa3_wigig_tx_bit_to_ep(input_smmu->int_gen_tx_bit_num,
+			&tx_client)) {
+			return -EINVAL;
+		}
+		wifi_ch = input_smmu->int_gen_tx_bit_num;
+		if (input_smmu->dbuff_smmu.data_buffer_size >> 16) {
+			IPAERR(
+				"data_buffer_size is over the 16 bit limit (0x%X)\n"
+				, input_smmu->dbuff_smmu.data_buffer_size);
+			return -EFAULT;
+		}
+
+		/* convert to kBytes */
+		aggr_byte_limit = IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(
+			input_smmu->dbuff_smmu.data_buffer_size);
+	} else {
+		input = (struct ipa_wigig_conn_tx_in_params *)in;
+		if (ipa3_wigig_tx_bit_to_ep(input->int_gen_tx_bit_num,
+			&tx_client)) {
+			return -EINVAL;
+		}
+		wifi_ch = input->int_gen_tx_bit_num;
+
+		if (input->dbuff.data_buffer_size >> 16) {
+			IPAERR(
+				"data_buffer_size is over the 16 bit limit (0x%X)\n"
+				, input->dbuff.data_buffer_size);
+			return -EFAULT;
+		}
+
+		/* convert to kBytes */
+		aggr_byte_limit = IPA_ADJUST_AGGR_BYTE_HARD_LIMIT(
+			input->dbuff.data_buffer_size);
+	}
+	IPADBG("client type is %d\n", tx_client);
+
+	ipa_ep_idx = ipa_get_ep_mapping(tx_client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to get ep (%d) %d.\n",
+			tx_client, ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid) {
+		IPAERR("EP %d already allocated.\n", ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	if (ep->gsi_offload_state) {
+		IPAERR("WIGIG channel bad state 0x%X\n",
+			ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	ep_gsi = ipa3_get_gsi_ep_info(tx_client);
+	if (!ep_gsi) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+			tx_client);
+		return -EFAULT;
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* setup tx ep cfg */
+	ep->valid = 1;
+	ep->client = tx_client;
+	result = ipa3_disable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx);
+		goto fail;
+	}
+
+	ep->client_notify = NULL;
+	ep->priv = NULL;
+
+	memset(&ep_cfg, 0, sizeof(ep_cfg));
+	ep_cfg.nat.nat_en = IPA_DST_NAT;
+	ep_cfg.hdr.hdr_len = ETH_HLEN;
+	ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
+	ep_cfg.hdr.hdr_ofst_pkt_size = 0;
+	ep_cfg.hdr.hdr_additional_const_len = 0;
+	ep_cfg.hdr_ext.hdr_little_endian = true;
+	ep_cfg.mode.mode = IPA_BASIC;
+
+	/* config hard byte limit, max is the buffer size (in kB)*/
+	ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+	ep_cfg.aggr.aggr = IPA_GENERIC;
+	ep_cfg.aggr.aggr_pkt_limit = 1;
+	ep_cfg.aggr.aggr_byte_limit = aggr_byte_limit;
+	ep_cfg.aggr.aggr_hard_byte_limit_en = IPA_ENABLE_AGGR;
+
+	if (ipa3_cfg_ep(ipa_ep_idx, &ep_cfg)) {
+		IPAERR("fail to setup rx pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (is_smmu_enabled) {
+		result = ipa3_wigig_smmu_map_channel(false,
+			&input_smmu->pipe_smmu,
+			&input_smmu->dbuff_smmu,
+			true);
+		if (result) {
+			IPAERR(
+				"failed to setup tx pipe smmu map client %d (ep %d)\n"
+			, tx_client, ipa_ep_idx);
+			result = -EFAULT;
+			goto fail;
+		}
+
+		pipe_info = &input_smmu->pipe_smmu;
+		buff = &input_smmu->dbuff_smmu;
+		desc_ring_HWHEAD_pa =
+			input_smmu->pipe_smmu.desc_ring_HWHEAD_pa;
+	} else {
+		pipe_info = &input->pipe;
+		buff = &input->dbuff;
+		desc_ring_HWHEAD_pa =
+			input->pipe.desc_ring_HWHEAD_pa;
+	}
+
+	result = ipa3_wigig_config_uc(
+		true, false, wifi_ch,
+		ep_gsi->ipa_gsi_chan_num,
+		desc_ring_HWHEAD_pa);
+	if (result)
+		goto fail_uc_config;
+
+	result = ipa3_wigig_config_gsi(false,
+		is_smmu_enabled,
+		pipe_info,
+		buff,
+		ep_gsi, ep);
+	if (result)
+		goto fail_gsi;
+
+	out->client = tx_client;
+	ep->gsi_offload_state |= IPA_WIGIG_CONNECTED;
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("wigig client %d (ep %d) connected successfully\n", tx_client,
+		ipa_ep_idx);
+	return 0;
+
+fail_gsi:
+	ipa3_wigig_config_uc(
+		false, false, wifi_ch,
+		ep_gsi->ipa_gsi_chan_num,
+		desc_ring_HWHEAD_pa);
+fail_uc_config:
+	if (input_smmu)
+		ipa3_wigig_smmu_map_channel(false, &input_smmu->pipe_smmu,
+			&input_smmu->dbuff_smmu, false);
+fail:
+	ep->valid = 0;
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+int ipa3_disconn_wigig_pipe_i(enum ipa_client_type client,
+	struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
+	void *dbuff)
+{
+	bool is_smmu_enabled;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+	const struct ipa_gsi_ep_config *ep_gsi;
+	int result;
+	bool rx = false;
+
+	IPADBG("\n");
+
+	ipa_ep_idx = ipa_get_ep_mapping(client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to get ep (%d) %d.\n",
+			client, ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return -EFAULT;
+	}
+
+	ep_gsi = ipa3_get_gsi_ep_info(client);
+	if (!ep_gsi) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+			client);
+		return -EFAULT;
+	}
+
+	if (ep->gsi_offload_state != IPA_WIGIG_CONNECTED) {
+		IPAERR("client in bad state(client %d) 0x%X\n",
+			client, ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	if (client == IPA_CLIENT_WIGIG_PROD)
+		rx = true;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* Release channel and evt*/
+	result = ipa3_release_gsi_channel(ipa_ep_idx);
+	if (result) {
+		IPAERR("failed to deallocate channel\n");
+		goto fail;
+	}
+
+	is_smmu_enabled = !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_WLAN];
+	if (is_smmu_enabled) {
+		if (!pipe_smmu || !dbuff) {
+			IPAERR("smmu input is null %pK %pK\n",
+				pipe_smmu, dbuff);
+			WARN_ON(1);
+		} else {
+			result = ipa3_wigig_smmu_map_channel(rx,
+				pipe_smmu,
+				dbuff,
+				false);
+			if (result) {
+				IPAERR(
+					"failed to unmap pipe smmu %d (ep %d)\n"
+					, client, ipa_ep_idx);
+				result = -EFAULT;
+				goto fail;
+			}
+		}
+	} else if (pipe_smmu || dbuff) {
+		IPAERR("smmu input is not null %pK %pK\n",
+			pipe_smmu, dbuff);
+		WARN_ON(1);
+	}
+
+	/* only gsi ch number and dir are necessary */
+	result = ipa3_wigig_config_uc(
+		false, rx, 0,
+		ep_gsi->ipa_gsi_chan_num, 0);
+	if (result) {
+		IPAERR("failed uC channel teardown %d\n", result);
+		WARN_ON(1);
+	}
+
+	memset(ep, 0, sizeof(struct ipa3_ep_context));
+
+	ep->gsi_offload_state = 0;
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("client (ep: %d) disconnected\n", ipa_ep_idx);
+
+	IPADBG("exit\n");
+	return 0;
+
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+int ipa3_wigig_uc_msi_init(bool init,
+	phys_addr_t periph_baddr_pa,
+	phys_addr_t pseudo_cause_pa,
+	phys_addr_t int_gen_tx_pa,
+	phys_addr_t int_gen_rx_pa,
+	phys_addr_t dma_ep_misc_pa)
+{
+	int result;
+	struct ipa_mem_buffer cmd;
+	enum ipa_cpu_2_hw_offload_commands command;
+	bool map = false;
+
+	IPADBG("\n");
+
+	/* first make sure registers are SMMU mapped if necessary*/
+	if ((!ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_UC])) {
+		if (init)
+			map = true;
+
+		IPADBG("SMMU enabled, map %d\n", map);
+
+		result = ipa3_smmu_map_peer_reg(
+			pseudo_cause_pa,
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR(
+				"failed to %s pseudo_cause reg %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail;
+		}
+
+		result = ipa3_smmu_map_peer_reg(
+			int_gen_tx_pa,
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR(
+				"failed to %s int_gen_tx reg %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail_gen_tx;
+		}
+
+		result = ipa3_smmu_map_peer_reg(
+			int_gen_rx_pa,
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR(
+				"failed to %s int_gen_rx reg %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail_gen_rx;
+		}
+
+		result = ipa3_smmu_map_peer_reg(
+			dma_ep_misc_pa,
+			map,
+			IPA_SMMU_CB_UC);
+		if (result) {
+			IPAERR(
+				"failed to %s dma_ep_misc reg %d\n",
+				map ? "map" : "unmap",
+				result);
+			goto fail_dma_ep_misc;
+		}
+	}
+
+	/*  now send the wigig hw base address to uC*/
+	if (init) {
+		struct IpaHwPeripheralInitCmdData_t *cmd_data;
+
+		cmd.size = sizeof(*cmd_data);
+		cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+		if (cmd.base == NULL) {
+			IPAERR("fail to get DMA memory.\n");
+			result = -ENOMEM;
+			if (map)
+				goto fail_alloc;
+			return result;
+		}
+		cmd_data = (struct IpaHwPeripheralInitCmdData_t *)cmd.base;
+		cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
+		cmd_data->Init_params.W11AdInit_params.periph_baddr_msb =
+			IPA_WIGIG_MSB(periph_baddr_pa);
+		cmd_data->Init_params.W11AdInit_params.periph_baddr_lsb =
+			IPA_WIGIG_LSB(periph_baddr_pa);
+		command = IPA_CPU_2_HW_CMD_PERIPHERAL_INIT;
+	} else {
+		struct IpaHwPeripheralDeinitCmdData_t *cmd_data;
+
+		cmd.size = sizeof(*cmd_data);
+		cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+		if (cmd.base == NULL) {
+			IPAERR("fail to get DMA memory.\n");
+			result = -ENOMEM;
+			if (map)
+				goto fail_alloc;
+			return result;
+		}
+		cmd_data = (struct IpaHwPeripheralDeinitCmdData_t *)cmd.base;
+		cmd_data->protocol = IPA_HW_PROTOCOL_11ad;
+		command = IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		command,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+	if (result) {
+		IPAERR("fail to %s uc MSI config\n", init ? "init" : "deinit");
+		goto fail_command;
+	}
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("exit\n");
+
+	return 0;
+fail_command:
+	dma_free_coherent(ipa3_ctx->uc_pdev,
+		cmd.size,
+		cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+fail_alloc:
+	ipa3_smmu_map_peer_reg(dma_ep_misc_pa, !map, IPA_SMMU_CB_UC);
+fail_dma_ep_misc:
+	ipa3_smmu_map_peer_reg(int_gen_rx_pa, !map, IPA_SMMU_CB_UC);
+fail_gen_rx:
+	ipa3_smmu_map_peer_reg(int_gen_tx_pa, !map, IPA_SMMU_CB_UC);
+fail_gen_tx:
+	ipa3_smmu_map_peer_reg(pseudo_cause_pa, !map, IPA_SMMU_CB_UC);
+fail:
+	return result;
+}
+
+int ipa3_enable_wigig_pipe_i(enum ipa_client_type client)
+{
+	int ipa_ep_idx, res;
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	int retry_cnt = 0;
+	uint64_t val;
+
+	IPADBG("\n");
+
+	ipa_ep_idx = ipa_get_ep_mapping(client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to get ep (%d) %d.\n",
+			client, ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return -EFAULT;
+	}
+
+	if (ep->gsi_offload_state != IPA_WIGIG_CONNECTED) {
+		IPAERR("WIGIG channel bad state 0x%X\n",
+			ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(client);
+
+	res = ipa3_enable_data_path(ipa_ep_idx);
+	if (res)
+		goto fail_enable_datapath;
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+
+	/* ring the event db (outside the ring boundary)*/
+	val = ep->gsi_mem_info.evt_ring_base_addr +
+		ep->gsi_mem_info.evt_ring_len;
+	res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl, val);
+	if (res) {
+		IPAERR(
+			"fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n"
+			, res, ep->gsi_evt_ring_hdl,
+			(unsigned long long)val);
+		res = -EFAULT;
+		goto fail_ring_evt;
+	}
+
+	res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (res != GSI_STATUS_SUCCESS) {
+		IPAERR("gsi_start_channel failed %d\n", res);
+		WARN_ON(1);
+		res = -EFAULT;
+		goto fail_gsi_start;
+	}
+
+	/* for TX we have to ring the channel db (last desc in the ring) */
+	if (client != IPA_CLIENT_WIGIG_PROD) {
+		uint64_t val;
+
+		val  = ep->gsi_mem_info.chan_ring_base_addr +
+			ep->gsi_mem_info.chan_ring_len -
+			IPA_WIGIG_DESC_RING_EL_SIZE;
+
+		res = gsi_ring_ch_ring_db(ep->gsi_chan_hdl, val);
+		if (res) {
+			IPAERR(
+				"fail to ring channel db %d. hdl=%lu wp=0x%llx\n"
+				, res, ep->gsi_chan_hdl,
+				(unsigned long long)val);
+			res = -EFAULT;
+			goto fail_ring_ch;
+		}
+	}
+
+	ep->gsi_offload_state |= IPA_WIGIG_ENABLED;
+
+	IPADBG("exit\n");
+
+	return 0;
+
+fail_ring_ch:
+	res = ipa3_stop_gsi_channel(ipa_ep_idx);
+	if (res != 0 && res != -GSI_STATUS_AGAIN &&
+		res != -GSI_STATUS_TIMED_OUT) {
+		IPAERR("failed to stop channel res = %d\n", res);
+	} else if (res == -GSI_STATUS_AGAIN) {
+		IPADBG("GSI stop channel failed retry cnt = %d\n",
+			retry_cnt);
+		retry_cnt++;
+		if (retry_cnt < GSI_STOP_MAX_RETRY_CNT)
+			goto fail_ring_ch;
+	} else {
+		IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl);
+	}
+	res = -EFAULT;
+fail_gsi_start:
+fail_ring_evt:
+	ipa3_disable_data_path(ipa_ep_idx);
+fail_enable_datapath:
+	IPA_ACTIVE_CLIENTS_DEC_EP(client);
+	return res;
+}
+
+int ipa3_disable_wigig_pipe_i(enum ipa_client_type client)
+{
+	int ipa_ep_idx, res;
+	struct ipa3_ep_context *ep;
+	struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	bool disable_force_clear = false;
+	u32 source_pipe_bitmask = 0;
+	int retry_cnt = 0;
+
+	IPADBG("\n");
+
+	ipa_ep_idx = ipa_get_ep_mapping(client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED ||
+		ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("fail to get ep (%d) %d.\n",
+			client, ipa_ep_idx);
+		return -EFAULT;
+	}
+	if (ipa_ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("ep %d out of range.\n", ipa_ep_idx);
+		return -EFAULT;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (!ep->valid) {
+		IPAERR("Invalid EP\n");
+		return -EFAULT;
+	}
+
+	if (ep->gsi_offload_state !=
+		(IPA_WIGIG_CONNECTED | IPA_WIGIG_ENABLED)) {
+		IPAERR("WIGIG channel bad state 0x%X\n",
+			ep->gsi_offload_state);
+		return -EFAULT;
+	}
+
+	IPADBG("pipe %d\n", ipa_ep_idx);
+	source_pipe_bitmask = 1 << ipa_ep_idx;
+	res = ipa3_enable_force_clear(ipa_ep_idx,
+		false, source_pipe_bitmask);
+	if (res) {
+		/*
+		 * assuming here modem SSR, AP can remove
+		 * the delay in this case
+		 */
+		IPAERR("failed to force clear %d\n", res);
+		IPAERR("remove delay from SCND reg\n");
+		ep_ctrl_scnd.endp_delay = false;
+		ipahal_write_reg_n_fields(
+			IPA_ENDP_INIT_CTRL_SCND_n, ipa_ep_idx,
+			&ep_ctrl_scnd);
+	} else {
+		disable_force_clear = true;
+	}
+retry_gsi_stop:
+	res = ipa3_stop_gsi_channel(ipa_ep_idx);
+	if (res != 0 && res != -GSI_STATUS_AGAIN &&
+		res != -GSI_STATUS_TIMED_OUT) {
+		IPAERR("failed to stop channel res = %d\n", res);
+		goto fail_stop_channel;
+	} else if (res == -GSI_STATUS_AGAIN) {
+		IPADBG("GSI stop channel failed retry cnt = %d\n",
+			retry_cnt);
+		retry_cnt++;
+		if (retry_cnt >= GSI_STOP_MAX_RETRY_CNT)
+			goto fail_stop_channel;
+		goto retry_gsi_stop;
+	} else {
+		IPADBG("GSI channel %ld STOP\n", ep->gsi_chan_hdl);
+	}
+
+	res = ipa3_reset_gsi_channel(ipa_ep_idx);
+	if (res != GSI_STATUS_SUCCESS) {
+		IPAERR("Failed to reset chan: %d.\n", res);
+		goto fail_stop_channel;
+	}
+
+	if (disable_force_clear)
+		ipa3_disable_force_clear(ipa_ep_idx);
+
+	res = ipa3_disable_data_path(ipa_ep_idx);
+	if (res) {
+		WARN_ON(1);
+		return res;
+	}
+
+	/* Set the delay after disabling IPA Producer pipe */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+	}
+
+	ep->gsi_offload_state &= ~IPA_WIGIG_ENABLED;
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(ipa_ep_idx));
+	IPADBG("exit\n");
+	return 0;
+
+fail_stop_channel:
+	ipa_assert();
+	return res;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 120218a..64482a1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -2383,8 +2383,8 @@ static void ipareg_parse_coal_qmap_cfg(enum ipahal_reg_name reg,
  * @parse - CB to parse register value to abstracted structure
  * @offset - register offset relative to base address
  * @n_ofst - N parameterized register sub-offset
- * @n_start - starting n for n_registers
- * @n_end - ending n for n_registers
+ * @n_start - starting n for n_registers used for printing
+ * @n_end - ending n for n_registers used for printing
  * @en_print - enable this register to be printed when the device crashes
  */
 struct ipahal_reg_obj {
@@ -2685,16 +2685,16 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
 	/* IPAv4.0 */
 	[IPA_HW_v4_0][IPA_SUSPEND_IRQ_INFO_EE_n] = {
 		ipareg_construct_dummy, ipareg_parse_dummy,
-		0x00003030, 0x1000, 0, 1, 1},
+		0x00003030, 0x1000, 0, 0, 1},
 	[IPA_HW_v4_0][IPA_SUSPEND_IRQ_EN_EE_n] = {
 		ipareg_construct_dummy, ipareg_parse_dummy,
-		0x00003034, 0x1000, 0, 1, 1},
+		0x00003034, 0x1000, 0, 0, 1},
 	[IPA_HW_v4_0][IPA_SUSPEND_IRQ_CLR_EE_n] = {
 		ipareg_construct_dummy, ipareg_parse_dummy,
-		0x00003038, 0x1000, 0, 1, 1},
+		0x00003038, 0x1000, 0, 0, 1},
 	[IPA_HW_v4_0][IPA_IRQ_EN_EE_n] = {
 		ipareg_construct_dummy, ipareg_parse_dummy,
-		0x0000300c, 0x1000, 0, 1, 1},
+		0x0000300c, 0x1000, 0, 0, 1},
 	[IPA_HW_v4_0][IPA_TAG_TIMER] = {
 		ipareg_construct_dummy, ipareg_parse_dummy,
 		0x00000060, 0, 0, 0, 1},
@@ -3170,7 +3170,7 @@ void ipahal_print_all_regs(bool print_to_dmesg)
 
 		j = reg->n_start;
 
-		if (j == reg->n_end) {
+		if (j == reg->n_end && (reg->n_ofst == 0)) {
 			if (print_to_dmesg)
 				IPAHAL_DBG_REG("%s=0x%x\n",
 					ipahal_reg_name_str(i),
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 20c4cc5..a9ecb0f 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -446,6 +446,7 @@ static struct device_attribute power_supply_attrs[] = {
 	POWER_SUPPLY_ATTR(fcc_stepper_enable),
 	POWER_SUPPLY_ATTR(toggle_stat),
 	POWER_SUPPLY_ATTR(main_fcc_max),
+	POWER_SUPPLY_ATTR(fg_reset),
 	/* Charge pump properties */
 	POWER_SUPPLY_ATTR(cp_status1),
 	POWER_SUPPLY_ATTR(cp_status2),
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index 9b96f78..586d18e 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"FG: %s: " fmt, __func__
@@ -1027,7 +1027,7 @@ static int fg_gen4_store_count(void *data, u16 *buf, int id, int length)
 	rc = fg_sram_write(&chip->fg, CYCLE_COUNT_WORD + id, CYCLE_COUNT_OFFSET,
 			(u8 *)buf, length, FG_IMA_DEFAULT);
 	if (rc < 0)
-		pr_err("failed to write bucket %d rc=%d\n", rc);
+		pr_err("failed to write bucket %d rc=%d\n", id, rc);
 
 	return rc;
 }
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index 7ab8dda..279a3e2 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt) "SMB1390: %s: " fmt, __func__
@@ -771,7 +771,7 @@ static int smb1390_parse_dt(struct smb1390 *chip)
 			rc = PTR_ERR(chip->iio.die_temp_chan);
 			if (rc != -EPROBE_DEFER)
 				dev_err(chip->dev,
-					"cp_die_temp channel unavailable %ld\n",
+					"cp_die_temp channel unavailable %d\n",
 					rc);
 			chip->iio.die_temp_chan = NULL;
 			return rc;
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index f751e87..28e35b8 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/device.h>
@@ -4952,7 +4952,7 @@ int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
 				const union power_supply_propval *val)
 {
 	int rc;
-	u8 stat, orientation;
+	u8 stat = 0, orientation;
 
 	chg->pr_swap_in_progress = val->intval;
 
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index fd77e46..70a006ba 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -387,8 +387,10 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
 		 * orb specified one of the unsupported formats, we defer
 		 * checking for IDAWs in unsupported formats to here.
 		 */
-		if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
+		if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
+			kfree(p);
 			return -EOPNOTSUPP;
+		}
 
 		if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
 			break;
@@ -528,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
 
 	ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
 	if (ret < 0)
-		goto out_init;
+		goto out_unpin;
 
 	/* Translate this direct ccw to a idal ccw. */
 	idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
diff --git a/drivers/soc/qcom/fsa4480-i2c.c b/drivers/soc/qcom/fsa4480-i2c.c
index 482bd05..1444136 100644
--- a/drivers/soc/qcom/fsa4480-i2c.c
+++ b/drivers/soc/qcom/fsa4480-i2c.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -7,6 +7,7 @@
 #include <linux/power_supply.h>
 #include <linux/regmap.h>
 #include <linux/i2c.h>
+#include <linux/mutex.h>
 #include <linux/soc/qcom/fsa4480-i2c.h>
 
 #define FSA4480_I2C_NAME	"fsa4480-driver"
@@ -33,6 +34,7 @@ struct fsa4480_priv {
 	atomic_t usbc_mode;
 	struct work_struct usbc_analog_work;
 	struct blocking_notifier_head fsa4480_notifier;
+	struct mutex notification_lock;
 };
 
 struct fsa4480_reg_val {
@@ -115,6 +117,7 @@ static int fsa4480_usbc_event_changed(struct notifier_block *nb,
 
 		dev_dbg(dev, "%s: queueing usbc_analog_work\n",
 			__func__);
+		pm_stay_awake(fsa_priv->dev);
 		schedule_work(&fsa_priv->usbc_analog_work);
 		break;
 	default:
@@ -123,6 +126,51 @@ static int fsa4480_usbc_event_changed(struct notifier_block *nb,
 	return ret;
 }
 
+static int fsa4480_usbc_analog_setup_switches(struct fsa4480_priv *fsa_priv)
+{
+	int rc = 0;
+	union power_supply_propval mode;
+	struct device *dev;
+
+	if (!fsa_priv)
+		return -EINVAL;
+	dev = fsa_priv->dev;
+	if (!dev)
+		return -EINVAL;
+
+	mutex_lock(&fsa_priv->notification_lock);
+	/* get latest mode again within locked context */
+	rc = power_supply_get_property(fsa_priv->usb_psy,
+			POWER_SUPPLY_PROP_TYPEC_MODE, &mode);
+	if (rc) {
+		dev_err(dev, "%s: Unable to read USB TYPEC_MODE: %d\n",
+			__func__, rc);
+		goto done;
+	}
+	dev_dbg(dev, "%s: setting GPIOs active = %d\n",
+		__func__, mode.intval != POWER_SUPPLY_TYPEC_NONE);
+
+	if (mode.intval != POWER_SUPPLY_TYPEC_NONE) {
+		/* activate switches */
+		fsa4480_usbc_update_settings(fsa_priv, 0x00, 0x9F);
+
+		/* notify call chain on event */
+		blocking_notifier_call_chain(&fsa_priv->fsa4480_notifier,
+		POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER, NULL);
+	} else {
+		/* notify call chain on event */
+		blocking_notifier_call_chain(&fsa_priv->fsa4480_notifier,
+				POWER_SUPPLY_TYPEC_NONE, NULL);
+
+		/* deactivate switches */
+		fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98);
+	}
+
+done:
+	mutex_unlock(&fsa_priv->notification_lock);
+	return rc;
+}
+
 /*
  * fsa4480_reg_notifier - register notifier block with fsa driver
  *
@@ -156,9 +204,7 @@ int fsa4480_reg_notifier(struct notifier_block *nb,
 	 */
 	dev_dbg(fsa_priv->dev, "%s: verify if USB adapter is already inserted\n",
 		__func__);
-	rc = fsa4480_usbc_event_changed(&fsa_priv->psy_nb,
-					     PSY_EVENT_PROP_CHANGED,
-					     fsa_priv->usb_psy);
+	rc = fsa4480_usbc_analog_setup_switches(fsa_priv);
 
 	return rc;
 }
@@ -185,7 +231,6 @@ int fsa4480_unreg_notifier(struct notifier_block *nb,
 	if (!fsa_priv)
 		return -EINVAL;
 
-	atomic_set(&(fsa_priv->usbc_mode), 0);
 	fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98);
 	return blocking_notifier_chain_unregister
 					(&fsa_priv->fsa4480_notifier, nb);
@@ -241,10 +286,10 @@ int fsa4480_switch_event(struct device_node *node,
 		fsa4480_usbc_update_settings(fsa_priv, switch_control, 0x9F);
 		break;
 	case FSA_USBC_ORIENTATION_CC1:
-		fsa4480_usbc_update_settings(fsa_priv, 0x00, 0xE0);
+		fsa4480_usbc_update_settings(fsa_priv, 0x18, 0xF8);
 		return fsa4480_validate_display_port_settings(fsa_priv);
 	case FSA_USBC_ORIENTATION_CC2:
-		fsa4480_usbc_update_settings(fsa_priv, 0x60, 0xE0);
+		fsa4480_usbc_update_settings(fsa_priv, 0x78, 0xF8);
 		return fsa4480_validate_display_port_settings(fsa_priv);
 	case FSA_USBC_DISPLAYPORT_DISCONNECTED:
 		fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98);
@@ -257,31 +302,6 @@ int fsa4480_switch_event(struct device_node *node,
 }
 EXPORT_SYMBOL(fsa4480_switch_event);
 
-static int fsa4480_usbc_analog_setup_switches
-			(struct fsa4480_priv *fsa_priv, bool active)
-{
-	dev_dbg(fsa_priv->dev, "%s: setting GPIOs active = %d\n",
-		__func__, active);
-
-	if (active) {
-		/* activate switches */
-		fsa4480_usbc_update_settings(fsa_priv, 0x00, 0x9F);
-
-		/* notify call chain on event */
-		blocking_notifier_call_chain(&fsa_priv->fsa4480_notifier,
-		POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER, NULL);
-	} else {
-		/* notify call chain on event */
-		blocking_notifier_call_chain(&fsa_priv->fsa4480_notifier,
-				POWER_SUPPLY_TYPEC_NONE, NULL);
-
-		/* deactivate switches */
-		fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98);
-	}
-
-	return 0;
-}
-
 static void fsa4480_usbc_analog_work_fn(struct work_struct *work)
 {
 	struct fsa4480_priv *fsa_priv =
@@ -291,8 +311,8 @@ static void fsa4480_usbc_analog_work_fn(struct work_struct *work)
 		pr_err("%s: fsa container invalid\n", __func__);
 		return;
 	}
-	fsa4480_usbc_analog_setup_switches(fsa_priv,
-		atomic_read(&(fsa_priv->usbc_mode)) != POWER_SUPPLY_TYPEC_NONE);
+	fsa4480_usbc_analog_setup_switches(fsa_priv);
+	pm_relax(fsa_priv->dev);
 }
 
 static void fsa4480_update_reg_defaults(struct regmap *regmap)
@@ -349,6 +369,7 @@ static int fsa4480_probe(struct i2c_client *i2c,
 		goto err_supply;
 	}
 
+	mutex_init(&fsa_priv->notification_lock);
 	i2c_set_clientdata(i2c, fsa_priv);
 
 	INIT_WORK(&fsa_priv->usbc_analog_work,
@@ -377,10 +398,12 @@ static int fsa4480_remove(struct i2c_client *i2c)
 		return -EINVAL;
 
 	fsa4480_usbc_update_settings(fsa_priv, 0x18, 0x98);
-
+	cancel_work(&fsa_priv->usbc_analog_work);
+	pm_relax(fsa_priv->dev);
 	/* deregister from PMI */
 	power_supply_unreg_notifier(&fsa_priv->psy_nb);
 	power_supply_put(fsa_priv->usb_psy);
+	mutex_destroy(&fsa_priv->notification_lock);
 	dev_set_drvdata(&i2c->dev, NULL);
 
 	return 0;
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
index 6ab2e2a..6a41d93 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.c
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -73,7 +73,7 @@ static int mem_share_configure_ramdump(int client)
 		clnt = "DIAG";
 		break;
 	default:
-		pr_err("memshare: no memshare clients registered\n");
+		dev_err(memsh_child->dev, "memshare: no memshare clients registered\n");
 		return -EINVAL;
 	}
 
@@ -84,12 +84,13 @@ static int mem_share_configure_ramdump(int client)
 			create_ramdump_device(client_name,
 				memshare_dev[client]);
 	} else {
-		pr_err("memshare:%s: invalid memshare device\n", __func__);
+		dev_err(memsh_child->dev,
+			"memshare: invalid memshare device for creating ramdump device\n");
 		return -ENODEV;
 	}
 	if (IS_ERR_OR_NULL(memshare_ramdump_dev[client])) {
-		pr_err("memshare: %s: Unable to create memshare ramdump device\n",
-				__func__);
+		dev_err(memsh_child->dev,
+			"memshare: unable to create memshare ramdump device\n");
 		memshare_ramdump_dev[client] = NULL;
 		return -ENOMEM;
 	}
@@ -110,7 +111,9 @@ static int check_client(int client_id, int proc, int request)
 		}
 	}
 	if ((found == DHMS_MEM_CLIENT_INVALID) && !request) {
-		pr_debug("memshare: No registered client, adding a new client\n");
+		dev_dbg(memsh_child->dev,
+			"memshare: No registered client for the client_id: %d, adding a new client\n",
+			client_id);
 		/* Add a new client */
 		for (i = 0; i < MAX_CLIENTS; i++) {
 			if (memblock[i].client_id == DHMS_MEM_CLIENT_INVALID) {
@@ -123,8 +126,9 @@ static int check_client(int client_id, int proc, int request)
 				if (!memblock[i].file_created) {
 					rc = mem_share_configure_ramdump(i);
 					if (rc)
-						pr_err("memshare: %s, Cannot create ramdump for client: %d\n",
-							__func__, client_id);
+						dev_err(memsh_child->dev,
+							"memshare_check_client: cannot create ramdump for client with id: %d\n",
+							client_id);
 					else
 						memblock[i].file_created = 1;
 				}
@@ -220,12 +224,14 @@ static int mem_share_do_ramdump(void)
 			client_name = "DIAG";
 			break;
 		default:
-			pr_err("memshare: no memshare clients registered\n");
+			dev_err(memsh_child->dev,
+				"memshare: no memshare clients registered for client_id: %d\n",
+				i);
 			return -EINVAL;
 		}
 
 		if (!memblock[i].allotted) {
-			pr_err("memshare:%s memblock is not allotted\n",
+			dev_err(memsh_child->dev, "memshare: %s: memblock is not allotted\n",
 			client_name);
 			continue;
 		}
@@ -233,8 +239,9 @@ static int mem_share_do_ramdump(void)
 		if (memblock[i].hyp_mapping &&
 			memblock[i].peripheral ==
 			DHMS_MEM_PROC_MPSS_V01) {
-			pr_debug("memshare: hypervisor unmapping  for client id: %d\n",
-				memblock[i].client_id);
+			dev_dbg(memsh_child->dev,
+				"memshare: %s: hypervisor unmapping for client before elf dump\n",
+				client_name);
 			if (memblock[i].alloc_request)
 				continue;
 			ret = hyp_assign_phys(
@@ -250,8 +257,9 @@ static int mem_share_do_ramdump(void)
 				 * earlier but during unmap
 				 * it lead to failure.
 				 */
-				pr_err("memshare: %s, failed to map the region to APPS\n",
-					__func__);
+				dev_err(memsh_child->dev,
+					"memshare: %s: failed to map the memory region to APPS\n",
+					client_name);
 			} else {
 				memblock[i].hyp_mapping = 0;
 			}
@@ -266,14 +274,16 @@ static int mem_share_do_ramdump(void)
 		ramdump_segments_tmp[0].size = memblock[i].size;
 		ramdump_segments_tmp[0].address = memblock[i].phy_addr;
 
-		pr_debug("memshare: %s:%s client:id: %d:size = %d\n",
-		__func__, client_name, i, memblock[i].size);
+		dev_dbg(memsh_child->dev, "memshare: %s: Begin elf dump for size = %d\n",
+			client_name, memblock[i].size);
 
 		ret = do_elf_ramdump(memshare_ramdump_dev[i],
 					ramdump_segments_tmp, 1);
 		kfree(ramdump_segments_tmp);
 		if (ret < 0) {
-			pr_err("memshare: Unable to dump: %d\n", ret);
+			dev_err(memsh_child->dev,
+				"memshare: %s: Unable to elf dump with failure: %d\n",
+				client_name, ret);
 			return ret;
 		}
 	}
@@ -312,23 +322,22 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
 		}
 
 		if (notifdata->enable_ramdump && ramdump_event) {
-			pr_debug("memshare: %s, Ramdump collection is enabled\n",
-					__func__);
+			dev_info(memsh_child->dev, "memshare: Ramdump collection is enabled\n");
 			ret = mem_share_do_ramdump();
 			if (ret)
-				pr_err("memshare: Ramdump collection failed\n");
+				dev_err(memsh_child->dev, "memshare: Ramdump collection failed\n");
 			ramdump_event = false;
 		}
 		break;
 
 	case SUBSYS_AFTER_POWERUP:
-		pr_debug("memshare: Modem has booted up\n");
+		dev_dbg(memsh_child->dev, "memshare: Modem has booted up\n");
 		for (i = 0; i < MAX_CLIENTS; i++) {
 			size = memblock[i].size;
 			if (memblock[i].free_memory > 0 &&
 					bootup_request >= 2) {
 				memblock[i].free_memory -= 1;
-				pr_debug("memshare: free_memory count: %d for client id: %d\n",
+				dev_dbg(memsh_child->dev, "memshare: free_memory count: %d for client id: %d\n",
 					memblock[i].free_memory,
 					memblock[i].client_id);
 			}
@@ -340,7 +349,8 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
 				!memblock[i].client_request &&
 				memblock[i].allotted &&
 				!memblock[i].alloc_request) {
-				pr_debug("memshare: hypervisor unmapping  for client id: %d\n",
+				dev_info(memsh_child->dev,
+					"memshare: hypervisor unmapping for allocated memory with client id: %d\n",
 					memblock[i].client_id);
 				if (memblock[i].hyp_mapping) {
 					ret = hyp_assign_phys(
@@ -357,8 +367,9 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
 						 * earlier but during unmap
 						 * it lead to failure.
 						 */
-						pr_err("memshare: %s, failed to unmap the region\n",
-							__func__);
+						dev_err(memsh_child->dev,
+							"memshare: failed to hypervisor unmap the memory region for client id: %d\n",
+							memblock[i].client_id);
 					} else {
 						memblock[i].hyp_mapping = 0;
 					}
@@ -401,7 +412,8 @@ static void shared_hyp_mapping(int client_id)
 	int dest_perms[1] = {PERM_READ|PERM_WRITE};
 
 	if (client_id == DHMS_MEM_CLIENT_INVALID) {
-		pr_err("memshare: %s, Invalid Client\n", __func__);
+		dev_err(memsh_child->dev,
+			"memshare: hypervisor mapping failure for invalid client\n");
 		return;
 	}
 
@@ -411,7 +423,7 @@ static void shared_hyp_mapping(int client_id)
 			dest_perms, 1);
 
 	if (ret != 0) {
-		pr_err("memshare: hyp_assign_phys failed size=%u err=%d\n",
+		dev_err(memsh_child->dev, "memshare: hyp_assign_phys failed size=%u err=%d\n",
 				memblock[client_id].size, ret);
 		return;
 	}
@@ -429,8 +441,9 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
 
 	mutex_lock(&memsh_drv->mem_share);
 	alloc_req = (struct mem_alloc_generic_req_msg_v01 *)decoded_msg;
-	pr_debug("memshare: alloc request client id: %d proc _id: %d\n",
-			alloc_req->client_id, alloc_req->proc_id);
+	dev_info(memsh_child->dev,
+		"memshare_alloc: memory alloc request received for client id: %d, proc_id: %d, request size: %d\n",
+		alloc_req->client_id, alloc_req->proc_id, alloc_req->num_bytes);
 	alloc_resp = kzalloc(sizeof(*alloc_resp),
 					GFP_KERNEL);
 	if (!alloc_resp) {
@@ -443,9 +456,9 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
 								CHECK);
 
 	if (client_id >= MAX_CLIENTS) {
-		pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n",
-				__func__, alloc_req->client_id,
-				alloc_req->proc_id);
+		dev_err(memsh_child->dev,
+			"memshare_alloc: client not found, requested client: %d, proc_id: %d\n",
+			alloc_req->client_id, alloc_req->proc_id);
 		kfree(alloc_resp);
 		alloc_resp = NULL;
 		mutex_unlock(&memsh_drv->mem_share);
@@ -460,8 +473,9 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
 		rc = memshare_alloc(memsh_drv->dev, size,
 					&memblock[client_id]);
 		if (rc) {
-			pr_err("memshare: %s,Unable to allocate memory for requested client\n",
-							__func__);
+			dev_err(memsh_child->dev,
+				"memshare_alloc: unable to allocate memory of size: %d for requested client\n",
+				size);
 			resp = 1;
 		}
 		if (!resp) {
@@ -471,9 +485,9 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
 			memblock[client_id].peripheral = alloc_req->proc_id;
 		}
 	}
-	pr_debug("memshare: In %s, free memory count for client id: %d = %d\n",
-		__func__, memblock[client_id].client_id,
-		memblock[client_id].free_memory);
+	dev_dbg(memsh_child->dev,
+		"memshare_alloc: free memory count for client id: %d = %d\n",
+		memblock[client_id].client_id, memblock[client_id].free_memory);
 
 	memblock[client_id].sequence_id = alloc_req->sequence_id;
 	memblock[client_id].alloc_request = 1;
@@ -487,17 +501,20 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
 		memblock[client_id].allotted)
 		shared_hyp_mapping(client_id);
 	mutex_unlock(&memsh_drv->mem_share);
-	pr_debug("memshare: alloc_resp.num_bytes :%d, alloc_resp.resp.result :%lx\n",
-			  alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes,
-			  (unsigned long)alloc_resp->resp.result);
+	dev_info(memsh_child->dev,
+		"memshare_alloc: client_id: %d, alloc_resp.num_bytes: %d, alloc_resp.resp.result: %lx\n",
+		alloc_req->client_id,
+		alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes,
+		(unsigned long)alloc_resp->resp.result);
 
 	rc = qmi_send_response(mem_share_svc_handle, sq, txn,
 			  MEM_ALLOC_GENERIC_RESP_MSG_V01,
 			  sizeof(struct mem_alloc_generic_resp_msg_v01),
 			  mem_alloc_generic_resp_msg_data_v01_ei, alloc_resp);
 	if (rc < 0)
-		pr_err("memshare: %s, Error sending the alloc response: %d\n",
-							__func__, rc);
+		dev_err(memsh_child->dev,
+		"memshare_alloc: Error sending the alloc response: %d\n",
+		rc);
 
 	kfree(alloc_resp);
 	alloc_resp = NULL;
@@ -516,22 +533,22 @@ static void handle_free_generic_req(struct qmi_handle *handle,
 
 	mutex_lock(&memsh_drv->mem_free);
 	free_req = (struct mem_free_generic_req_msg_v01 *)decoded_msg;
-	pr_debug("memshare: %s: Received Free Request\n", __func__);
 	memset(&free_resp, 0, sizeof(free_resp));
 	free_resp.resp.error = QMI_ERR_INTERNAL_V01;
 	free_resp.resp.result = QMI_RESULT_FAILURE_V01;
-	pr_debug("memshare: Client id: %d proc id: %d\n", free_req->client_id,
-				free_req->proc_id);
+	dev_info(memsh_child->dev,
+		"memshare_free: handling memory free request with client id: %d, proc_id: %d\n",
+		free_req->client_id, free_req->proc_id);
 	client_id = check_client(free_req->client_id, free_req->proc_id, FREE);
 	if (client_id == DHMS_MEM_CLIENT_INVALID) {
-		pr_err("memshare: %s, Invalid client request to free memory\n",
-					__func__);
+		dev_err(memsh_child->dev, "memshare_free: invalid client request to free memory\n");
 		flag = 1;
 	} else if (!memblock[client_id].guarantee &&
 				!memblock[client_id].client_request &&
 				memblock[client_id].allotted) {
-		pr_debug("memshare: %s:client_id:%d - size: %d\n",
-				__func__, client_id, memblock[client_id].size);
+		dev_dbg(memsh_child->dev,
+			"memshare_free: hypervisor unmapping for client_id:%d - size: %d\n",
+			client_id, memblock[client_id].size);
 		ret = hyp_assign_phys(memblock[client_id].phy_addr,
 				memblock[client_id].size, source_vmlist, 1,
 				dest_vmids, dest_perms, 1);
@@ -540,8 +557,9 @@ static void handle_free_generic_req(struct qmi_handle *handle,
 		 * This is an error case as hyp mapping was successful
 		 * earlier but during unmap it lead to failure.
 		 */
-			pr_err("memshare: %s, failed to unmap the region for client id:%d\n",
-				__func__, client_id);
+			dev_err(memsh_child->dev,
+				"memshare_free: failed to unmap the region for client id:%d\n",
+				client_id);
 		}
 		size = memblock[client_id].size;
 		if (memblock[client_id].guard_band) {
@@ -558,8 +576,9 @@ static void handle_free_generic_req(struct qmi_handle *handle,
 			attrs);
 		free_client(client_id);
 	} else {
-		pr_err("memshare: %s, Request came for a guaranteed client (client_id: %d) cannot free up the memory\n",
-						__func__, client_id);
+		dev_err(memsh_child->dev,
+			"memshare_free: cannot free the memory for a guaranteed client (client_id: %d)\n",
+			client_id);
 	}
 
 	if (flag) {
@@ -576,8 +595,9 @@ static void handle_free_generic_req(struct qmi_handle *handle,
 			  sizeof(struct mem_free_generic_resp_msg_v01),
 			  mem_free_generic_resp_msg_data_v01_ei, &free_resp);
 	if (rc < 0)
-		pr_err("memshare: %s, Error sending the free response: %d\n",
-					__func__, rc);
+		dev_err(memsh_child->dev,
+		"memshare_free: error sending the free response: %d\n", rc);
+
 }
 
 static void handle_query_size_req(struct qmi_handle *handle,
@@ -595,15 +615,16 @@ static void handle_query_size_req(struct qmi_handle *handle,
 		mutex_unlock(&memsh_drv->mem_share);
 		return;
 	}
-	pr_debug("memshare: query request client id: %d proc _id: %d\n",
+	dev_dbg(memsh_child->dev,
+		"memshare_query: query on availalbe memory size for client id: %d, proc_id: %d\n",
 		query_req->client_id, query_req->proc_id);
 	client_id = check_client(query_req->client_id, query_req->proc_id,
 								CHECK);
 
 	if (client_id >= MAX_CLIENTS) {
-		pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n",
-				__func__, query_req->client_id,
-				query_req->proc_id);
+		dev_err(memsh_child->dev,
+			"memshare_query: client not found, requested client: %d, proc_id: %d\n",
+			query_req->client_id, query_req->proc_id);
 		kfree(query_resp);
 		query_resp = NULL;
 		mutex_unlock(&memsh_drv->mem_share);
@@ -621,16 +642,17 @@ static void handle_query_size_req(struct qmi_handle *handle,
 	query_resp->resp.error = QMI_ERR_NONE_V01;
 	mutex_unlock(&memsh_drv->mem_share);
 
-	pr_debug("memshare: query_resp.size :%d, query_resp.resp.result :%lx\n",
-			  query_resp->size,
-			  (unsigned long)query_resp->resp.result);
+	dev_info(memsh_child->dev,
+		"memshare_query: client_id : %d, query_resp.size :%d, query_resp.resp.result :%lx\n",
+		query_req->client_id, query_resp->size,
+		(unsigned long)query_resp->resp.result);
 	rc = qmi_send_response(mem_share_svc_handle, sq, txn,
 			  MEM_QUERY_SIZE_RESP_MSG_V01,
 			  MEM_QUERY_MAX_MSG_LEN_V01,
 			  mem_query_size_resp_msg_data_v01_ei, query_resp);
 	if (rc < 0)
-		pr_err("memshare: %s, Error sending the query response: %d\n",
-							__func__, rc);
+		dev_err(memsh_child->dev,
+		"memshare_query: Error sending the query response: %d\n", rc);
 
 	kfree(query_resp);
 	query_resp = NULL;
@@ -673,11 +695,12 @@ int memshare_alloc(struct device *dev,
 					unsigned int block_size,
 					struct mem_blocks *pblk)
 {
-	pr_debug("memshare: %s\n", __func__);
+	dev_dbg(memsh_child->dev,
+		"memshare: allocation request for size: %d", block_size);
 
 	if (!pblk) {
-		pr_err("memshare: %s: Failed memory block allocation\n",
-			__func__);
+		dev_err(memsh_child->dev,
+			"memshare: Failed memory block allocation\n");
 		return -ENOMEM;
 	}
 
@@ -710,8 +733,8 @@ static void memshare_init_worker(struct work_struct *work)
 		sizeof(struct qmi_elem_info),
 		&server_ops, qmi_memshare_handlers);
 	if (rc < 0) {
-		pr_err("memshare: %s: Creating mem_share_svc qmi handle failed\n",
-			__func__);
+		dev_err(memsh_child->dev,
+			"memshare: Creating mem_share_svc qmi handle failed\n");
 		kfree(mem_share_svc_handle);
 		destroy_workqueue(mem_share_svc_workqueue);
 		return;
@@ -719,14 +742,14 @@ static void memshare_init_worker(struct work_struct *work)
 	rc = qmi_add_server(mem_share_svc_handle, MEM_SHARE_SERVICE_SVC_ID,
 		MEM_SHARE_SERVICE_VERS, MEM_SHARE_SERVICE_INS_ID);
 	if (rc < 0) {
-		pr_err("memshare: %s: Registering mem share svc failed %d\n",
-			__func__, rc);
+		dev_err(memsh_child->dev,
+			"memshare: Registering mem share svc failed %d\n", rc);
 		qmi_handle_release(mem_share_svc_handle);
 		kfree(mem_share_svc_handle);
 		destroy_workqueue(mem_share_svc_workqueue);
 		return;
 	}
-	pr_debug("memshare: memshare_init successful\n");
+	dev_dbg(memsh_child->dev, "memshare: memshare_init successful\n");
 }
 
 static int memshare_child_probe(struct platform_device *pdev)
@@ -749,16 +772,16 @@ static int memshare_child_probe(struct platform_device *pdev)
 	rc = of_property_read_u32(pdev->dev.of_node, "qcom,peripheral-size",
 						&size);
 	if (rc) {
-		pr_err("memshare: %s, Error reading size of clients, rc: %d\n",
-				__func__, rc);
+		dev_err(memsh_child->dev, "memshare: Error reading size of clients, rc: %d\n",
+				rc);
 		return rc;
 	}
 
 	rc = of_property_read_u32(pdev->dev.of_node, "qcom,client-id",
 						&client_id);
 	if (rc) {
-		pr_err("memshare: %s, Error reading client id, rc: %d\n",
-				__func__, rc);
+		dev_err(memsh_child->dev, "memshare: Error reading client id, rc: %d\n",
+				rc);
 		return rc;
 	}
 
@@ -777,8 +800,8 @@ static int memshare_child_probe(struct platform_device *pdev)
 	rc = of_property_read_string(pdev->dev.of_node, "label",
 						&name);
 	if (rc) {
-		pr_err("memshare: %s, Error reading peripheral info for client, rc: %d\n",
-					__func__, rc);
+		dev_err(memsh_child->dev, "memshare: Error reading peripheral info for client, rc: %d\n",
+					rc);
 		return rc;
 	}
 
@@ -802,8 +825,9 @@ static int memshare_child_probe(struct platform_device *pdev)
 				size,
 				&memblock[num_clients]);
 		if (rc) {
-			pr_err("memshare: %s, Unable to allocate memory for guaranteed clients, rc: %d\n",
-							__func__, rc);
+			dev_err(memsh_child->dev,
+				"memshare_child: Unable to allocate memory for guaranteed clients, rc: %d\n",
+				rc);
 			return rc;
 		}
 		memblock[num_clients].allotted = 1;
@@ -820,9 +844,9 @@ static int memshare_child_probe(struct platform_device *pdev)
 	if (!memblock[num_clients].file_created) {
 		rc = mem_share_configure_ramdump(num_clients);
 		if (rc)
-			pr_err("memshare: %s, cannot collect dumps for client id: %d\n",
-					__func__,
-					memblock[num_clients].client_id);
+			dev_err(memsh_child->dev,
+			"memshare_child: cannot create ramdump for client with id: %d\n",
+			memblock[num_clients].client_id);
 		else
 			memblock[num_clients].file_created = 1;
 	}
@@ -860,13 +884,13 @@ static int memshare_probe(struct platform_device *pdev)
 				&pdev->dev);
 
 	if (rc) {
-		pr_err("memshare: %s, error populating the devices\n",
-			__func__);
+		dev_err(memsh_child->dev,
+			"memshare: error populating the devices\n");
 		return rc;
 	}
 
 	subsys_notif_register_notifier("modem", &nb);
-	pr_debug("memshare: %s, Memshare inited\n", __func__);
+	dev_dbg(memsh_child->dev, "memshare: Memshare inited\n");
 
 	return 0;
 }
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 671d078..1aba268 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -533,6 +533,18 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called spi_qup.
 
+config SPI_QCOM_GENI
+        tristate "Qualcomm Technologies Inc.'s GENI based SPI controller"
+        depends on ARCH_QCOM
+        help
+          SPI driver for Qualcomm Technologies Inc's GENI based controller.
+          The controller can run upto 50 Mhz, support upto 4 CS lines,
+          programmable bits per word from 4 to 32 and supports the various
+          SPI modes. It can operate in FIFO mode (SW driven IO) and DMA mode.
+
+          This driver can also be built as a module.  If so, the module
+          will be called spi-geni-qcom.
+
 config SPI_S3C24XX
 	tristate "Samsung S3C24XX series SPI"
 	depends on ARCH_S3C24XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index a90d559..8f7d544 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -75,6 +75,7 @@
 obj-$(CONFIG_SPI_PXA2XX)		+= spi-pxa2xx-platform.o
 obj-$(CONFIG_SPI_PXA2XX_PCI)		+= spi-pxa2xx-pci.o
 obj-$(CONFIG_SPI_QUP)			+= spi-qup.o
+obj-$(CONFIG_SPI_QCOM_GENI)             += spi-geni-qcom.o
 obj-$(CONFIG_SPI_ROCKCHIP)		+= spi-rockchip.o
 obj-$(CONFIG_SPI_RB4XX)			+= spi-rb4xx.o
 obj-$(CONFIG_SPI_RSPI)			+= spi-rspi.o
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
new file mode 100644
index 0000000..424354f
--- /dev/null
+++ b/drivers/spi/spi-geni-qcom.c
@@ -0,0 +1,1642 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/msm_gpi.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-geni-qcom.h>
+#include <linux/pinctrl/consumer.h>
+
+#define SPI_NUM_CHIPSELECT	(4)
+#define SPI_XFER_TIMEOUT_MS	(250)
+#define SPI_AUTO_SUSPEND_DELAY	(250)
+/* SPI SE specific registers */
+#define SE_SPI_CPHA		(0x224)
+#define SE_SPI_LOOPBACK		(0x22C)
+#define SE_SPI_CPOL		(0x230)
+#define SE_SPI_DEMUX_OUTPUT_INV	(0x24C)
+#define SE_SPI_DEMUX_SEL	(0x250)
+#define SE_SPI_TRANS_CFG	(0x25C)
+#define SE_SPI_WORD_LEN		(0x268)
+#define SE_SPI_TX_TRANS_LEN	(0x26C)
+#define SE_SPI_RX_TRANS_LEN	(0x270)
+#define SE_SPI_PRE_POST_CMD_DLY	(0x274)
+#define SE_SPI_DELAY_COUNTERS	(0x278)
+
+/* SE_SPI_CPHA register fields */
+#define CPHA			(BIT(0))
+
+/* SE_SPI_LOOPBACK register fields */
+#define LOOPBACK_ENABLE		(0x1)
+#define NORMAL_MODE		(0x0)
+#define LOOPBACK_MSK		(GENMASK(1, 0))
+
+/* SE_SPI_CPOL register fields */
+#define CPOL			(BIT(2))
+
+/* SE_SPI_DEMUX_OUTPUT_INV register fields */
+#define CS_DEMUX_OUTPUT_INV_MSK	(GENMASK(3, 0))
+
+/* SE_SPI_DEMUX_SEL register fields */
+#define CS_DEMUX_OUTPUT_SEL	(GENMASK(3, 0))
+
+/* SE_SPI_TX_TRANS_CFG register fields */
+#define CS_TOGGLE		(BIT(0))
+
+/* SE_SPI_WORD_LEN register fields */
+#define WORD_LEN_MSK		(GENMASK(9, 0))
+#define MIN_WORD_LEN		(4)
+
+/* SPI_TX/SPI_RX_TRANS_LEN fields */
+#define TRANS_LEN_MSK		(GENMASK(23, 0))
+
+/* SE_SPI_DELAY_COUNTERS */
+#define SPI_INTER_WORDS_DELAY_MSK	(GENMASK(9, 0))
+#define SPI_CS_CLK_DELAY_MSK		(GENMASK(19, 10))
+#define SPI_CS_CLK_DELAY_SHFT		(10)
+
+/* M_CMD OP codes for SPI */
+#define SPI_TX_ONLY		(1)
+#define SPI_RX_ONLY		(2)
+#define SPI_FULL_DUPLEX		(3)
+#define SPI_TX_RX		(7)
+#define SPI_CS_ASSERT		(8)
+#define SPI_CS_DEASSERT		(9)
+#define SPI_SCK_ONLY		(10)
+/* M_CMD params for SPI */
+#define SPI_PRE_CMD_DELAY	BIT(0)
+#define TIMESTAMP_BEFORE	BIT(1)
+#define FRAGMENTATION		BIT(2)
+#define TIMESTAMP_AFTER		BIT(3)
+#define POST_CMD_DELAY		BIT(4)
+
+#define SPI_CORE2X_VOTE		(7600)
+/* GSI CONFIG0 TRE Params */
+/* Flags bit fields */
+#define GSI_LOOPBACK_EN		(BIT(0))
+#define GSI_CS_TOGGLE		(BIT(3))
+#define GSI_CPHA		(BIT(4))
+#define GSI_CPOL		(BIT(5))
+
+#define MAX_TX_SG		(3)
+#define NUM_SPI_XFER		(8)
+
+struct gsi_desc_cb {
+	struct spi_master *spi;
+	struct spi_transfer *xfer;
+};
+
+struct spi_geni_gsi {
+	struct msm_gpi_tre config0_tre;
+	struct msm_gpi_tre go_tre;
+	struct msm_gpi_tre tx_dma_tre;
+	struct msm_gpi_tre rx_dma_tre;
+	struct scatterlist tx_sg[MAX_TX_SG];
+	struct scatterlist rx_sg;
+	dma_cookie_t tx_cookie;
+	dma_cookie_t rx_cookie;
+	struct msm_gpi_dma_async_tx_cb_param tx_cb_param;
+	struct msm_gpi_dma_async_tx_cb_param rx_cb_param;
+	struct dma_async_tx_descriptor *tx_desc;
+	struct dma_async_tx_descriptor *rx_desc;
+	struct gsi_desc_cb desc_cb;
+};
+
+struct spi_geni_master {
+	struct se_geni_rsc spi_rsc;
+	resource_size_t phys_addr;
+	resource_size_t size;
+	void __iomem *base;
+	int irq;
+	struct device *dev;
+	int rx_fifo_depth;
+	int tx_fifo_depth;
+	int tx_fifo_width;
+	int tx_wm;
+	bool setup;
+	u32 cur_speed_hz;
+	int cur_word_len;
+	unsigned int tx_rem_bytes;
+	unsigned int rx_rem_bytes;
+	struct spi_transfer *cur_xfer;
+	struct completion xfer_done;
+	struct device *wrapper_dev;
+	int oversampling;
+	struct spi_geni_gsi *gsi;
+	struct dma_chan *tx;
+	struct dma_chan *rx;
+	struct msm_gpi_ctrl tx_event;
+	struct msm_gpi_ctrl rx_event;
+	struct completion tx_cb;
+	struct completion rx_cb;
+	bool qn_err;
+	int cur_xfer_mode;
+	int num_tx_eot;
+	int num_rx_eot;
+	int num_xfers;
+	void *ipc;
+	bool shared_se;
+	bool dis_autosuspend;
+	bool cmd_done;
+};
+
+static struct spi_master *get_spi_master(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct spi_master *spi = platform_get_drvdata(pdev);
+
+	return spi;
+}
+
+static int get_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas,
+			int *clk_idx, int *clk_div)
+{
+	unsigned long sclk_freq;
+	unsigned long res_freq;
+	struct se_geni_rsc *rsc = &mas->spi_rsc;
+	int ret = 0;
+
+	ret = geni_se_clk_freq_match(&mas->spi_rsc,
+				(speed_hz * mas->oversampling), clk_idx,
+				&sclk_freq, false);
+	if (ret) {
+		dev_err(mas->dev, "%s: Failed(%d) to find src clk for 0x%x\n",
+						__func__, ret, speed_hz);
+		return ret;
+	}
+
+	*clk_div = DIV_ROUND_UP(sclk_freq,  (mas->oversampling*speed_hz));
+
+	if (!(*clk_div)) {
+		dev_err(mas->dev, "%s:Err:sclk:%lu oversampling:%d speed:%u\n",
+			__func__, sclk_freq, mas->oversampling, speed_hz);
+		return -EINVAL;
+	}
+
+	res_freq = (sclk_freq / (*clk_div));
+
+	dev_dbg(mas->dev, "%s: req %u resultant %u sclk %lu, idx %d, div %d\n",
+		__func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div);
+
+	ret = clk_set_rate(rsc->se_clk, sclk_freq);
+	if (ret)
+		dev_err(mas->dev, "%s: clk_set_rate failed %d\n",
+							__func__, ret);
+	return ret;
+}
+
+static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
+						int bits_per_word)
+{
+	int pack_words = 1;
+	bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
+	u32 word_len = geni_read_reg(mas->base, SE_SPI_WORD_LEN);
+	unsigned long cfg0, cfg1;
+
+	/*
+	 * If bits_per_word isn't a byte aligned value, set the packing to be
+	 * 1 SPI word per FIFO word.
+	 */
+	if (!(mas->tx_fifo_width % bits_per_word))
+		pack_words = mas->tx_fifo_width / bits_per_word;
+	word_len &= ~WORD_LEN_MSK;
+	word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
+	se_config_packing(mas->base, bits_per_word, pack_words, msb_first);
+	geni_write_reg(word_len, mas->base, SE_SPI_WORD_LEN);
+	se_get_packing_config(bits_per_word, pack_words, msb_first,
+							&cfg0, &cfg1);
+	GENI_SE_DBG(mas->ipc, false, mas->dev,
+		"%s: cfg0 %lu cfg1 %lu bpw %d pack_words %d\n", __func__,
+		cfg0, cfg1, bits_per_word, pack_words);
+}
+
+static int setup_fifo_params(struct spi_device *spi_slv,
+					struct spi_master *spi)
+{
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+	u16 mode = spi_slv->mode;
+	u32 loopback_cfg = geni_read_reg(mas->base, SE_SPI_LOOPBACK);
+	u32 cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
+	u32 cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
+	u32 demux_sel = 0;
+	u32 demux_output_inv = 0;
+	u32 clk_sel = 0;
+	u32 m_clk_cfg = 0;
+	int ret = 0;
+	int idx;
+	int div;
+	struct spi_geni_qcom_ctrl_data *delay_params = NULL;
+	u32 spi_delay_params = 0;
+
+	loopback_cfg &= ~LOOPBACK_MSK;
+	cpol &= ~CPOL;
+	cpha &= ~CPHA;
+
+	if (mode & SPI_LOOP)
+		loopback_cfg |= LOOPBACK_ENABLE;
+
+	if (mode & SPI_CPOL)
+		cpol |= CPOL;
+
+	if (mode & SPI_CPHA)
+		cpha |= CPHA;
+
+	if (spi_slv->mode & SPI_CS_HIGH)
+		demux_output_inv |= BIT(spi_slv->chip_select);
+
+	if (spi_slv->controller_data) {
+		u32 cs_clk_delay = 0;
+		u32 inter_words_delay = 0;
+
+		delay_params =
+		(struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
+		cs_clk_delay =
+		(delay_params->spi_cs_clk_delay << SPI_CS_CLK_DELAY_SHFT)
+							& SPI_CS_CLK_DELAY_MSK;
+		inter_words_delay =
+			delay_params->spi_inter_words_delay &
+						SPI_INTER_WORDS_DELAY_MSK;
+		spi_delay_params =
+		(inter_words_delay | cs_clk_delay);
+	}
+
+	demux_sel = spi_slv->chip_select;
+	mas->cur_speed_hz = spi_slv->max_speed_hz;
+	mas->cur_word_len = spi_slv->bits_per_word;
+
+	ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
+	if (ret) {
+		dev_err(mas->dev, "Err setting clks ret(%d) for %d\n",
+							ret, mas->cur_speed_hz);
+		goto setup_fifo_params_exit;
+	}
+
+	clk_sel |= (idx & CLK_SEL_MSK);
+	m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
+	spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
+	geni_write_reg(loopback_cfg, mas->base, SE_SPI_LOOPBACK);
+	geni_write_reg(demux_sel, mas->base, SE_SPI_DEMUX_SEL);
+	geni_write_reg(cpha, mas->base, SE_SPI_CPHA);
+	geni_write_reg(cpol, mas->base, SE_SPI_CPOL);
+	geni_write_reg(demux_output_inv, mas->base, SE_SPI_DEMUX_OUTPUT_INV);
+	geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
+	geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
+	geni_write_reg(spi_delay_params, mas->base, SE_SPI_DELAY_COUNTERS);
+	GENI_SE_DBG(mas->ipc, false, mas->dev,
+		"%s:Loopback%d demux_sel0x%x demux_op_inv 0x%x clk_cfg 0x%x\n",
+		__func__, loopback_cfg, demux_sel, demux_output_inv, m_clk_cfg);
+	GENI_SE_DBG(mas->ipc, false, mas->dev,
+		"%s:clk_sel 0x%x cpol %d cpha %d delay 0x%x\n", __func__,
+					clk_sel, cpol, cpha, spi_delay_params);
+	/* Ensure message level attributes are written before returning */
+	mb();
+setup_fifo_params_exit:
+	return ret;
+}
+
+
+static int select_xfer_mode(struct spi_master *spi,
+				struct spi_message *spi_msg)
+{
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+	int mode = SE_DMA;
+	int fifo_disable = (geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
+							FIFO_IF_DISABLE);
+	bool dma_chan_valid =
+		!(IS_ERR_OR_NULL(mas->tx) || IS_ERR_OR_NULL(mas->rx));
+
+	/*
+	 * If FIFO Interface is disabled and there are no DMA channels then we
+	 * can't do this transfer.
+	 * If FIFO interface is disabled, we can do GSI only,
+	 * else pick FIFO mode.
+	 */
+	if (fifo_disable && !dma_chan_valid)
+		mode = -EINVAL;
+	else if (!fifo_disable)
+		mode = SE_DMA;
+	else if (dma_chan_valid)
+		mode = GSI_DMA;
+	return mode;
+}
+
+static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer,
+				struct spi_geni_master *mas, u16 mode,
+				u32 cs_clk_delay, u32 inter_words_delay)
+{
+	struct msm_gpi_tre *c0_tre = &mas->gsi[mas->num_xfers].config0_tre;
+	u8 flags = 0;
+	u8 word_len = 0;
+	u8 pack = 0;
+	int div = 0;
+	int idx = 0;
+	int ret = 0;
+
+	if (IS_ERR_OR_NULL(c0_tre))
+		return c0_tre;
+
+	if (mode & SPI_LOOP)
+		flags |= GSI_LOOPBACK_EN;
+
+	if (mode & SPI_CPOL)
+		flags |= GSI_CPOL;
+
+	if (mode & SPI_CPHA)
+		flags |= GSI_CPHA;
+
+	word_len = xfer->bits_per_word - MIN_WORD_LEN;
+	pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN);
+	ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
+	if (ret) {
+		dev_err(mas->dev, "%s:Err setting clks:%d\n", __func__, ret);
+		return ERR_PTR(ret);
+	}
+	c0_tre->dword[0] = MSM_GPI_SPI_CONFIG0_TRE_DWORD0(pack, flags,
+								word_len);
+	c0_tre->dword[1] = MSM_GPI_SPI_CONFIG0_TRE_DWORD1(0, cs_clk_delay,
+							inter_words_delay);
+	c0_tre->dword[2] = MSM_GPI_SPI_CONFIG0_TRE_DWORD2(idx, div);
+	c0_tre->dword[3] = MSM_GPI_SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 0, 1);
+	GENI_SE_DBG(mas->ipc, false, mas->dev,
+		"%s: flags 0x%x word %d pack %d idx %d div %d\n",
+		__func__, flags, word_len, pack, idx, div);
+	GENI_SE_DBG(mas->ipc, false, mas->dev,
+		"%s: cs_clk_delay %d inter_words_delay %d\n", __func__,
+				 cs_clk_delay, inter_words_delay);
+	return c0_tre;
+}
+
+static struct msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags,
+				struct spi_geni_master *mas)
+{
+	struct msm_gpi_tre *go_tre = &mas->gsi[mas->num_xfers].go_tre;
+	int chain;
+	int eot;
+	int eob;
+	int link_rx = 0;
+
+	if (IS_ERR_OR_NULL(go_tre))
+		return go_tre;
+
+	go_tre->dword[0] = MSM_GPI_SPI_GO_TRE_DWORD0(flags, cs, cmd);
+	go_tre->dword[1] = MSM_GPI_SPI_GO_TRE_DWORD1;
+	go_tre->dword[2] = MSM_GPI_SPI_GO_TRE_DWORD2(rx_len);
+	if (cmd == SPI_RX_ONLY) {
+		eot = 0;
+		chain = 0;
+		eob = 1;
+	} else {
+		eot = 0;
+		chain = 1;
+		eob = 0;
+	}
+	if (cmd & SPI_RX_ONLY)
+		link_rx = 1;
+	go_tre->dword[3] = MSM_GPI_SPI_GO_TRE_DWORD3(link_rx, 0, eot, eob,
+								chain);
+	GENI_SE_DBG(mas->ipc, false, mas->dev,
+	"%s: rx len %d flags 0x%x cs %d cmd %d eot %d eob %d chain %d\n",
+		__func__, rx_len, flags, cs, cmd, eot, eob, chain);
+	return go_tre;
+}
+
+static struct msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre,
+					dma_addr_t buf, u32 len,
+					struct spi_geni_master *mas,
+					bool is_tx)
+{
+	if (IS_ERR_OR_NULL(tre))
+		return tre;
+
+	tre->dword[0] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(buf);
+	tre->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(buf);
+	tre->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(len);
+	tre->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, is_tx, 0, 0);
+	return tre;
+}
+
+static void spi_gsi_ch_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb,
+				void *ptr)
+{
+	struct spi_master *spi = ptr;
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+	switch (cb->cb_event) {
+	case MSM_GPI_QUP_NOTIFY:
+	case MSM_GPI_QUP_MAX_EVENT:
+		GENI_SE_DBG(mas->ipc, false, mas->dev,
+				"%s:cb_ev%d status%llu ts%llu count%llu\n",
+				__func__, cb->cb_event, cb->status,
+				cb->timestamp, cb->count);
+		break;
+	case MSM_GPI_QUP_ERROR:
+	case MSM_GPI_QUP_CH_ERROR:
+	case MSM_GPI_QUP_FW_ERROR:
+	case MSM_GPI_QUP_PENDING_EVENT:
+	case MSM_GPI_QUP_EOT_DESC_MISMATCH:
+	case MSM_GPI_QUP_SW_ERROR:
+		GENI_SE_ERR(mas->ipc, true, mas->dev,
+				"%s: cb_ev %d status %llu ts %llu count %llu\n",
+				__func__, cb->cb_event, cb->status,
+				cb->timestamp, cb->count);
+		GENI_SE_ERR(mas->ipc, true, mas->dev,
+				"err.routine %u, err.type %u, err.code %u\n",
+				cb->error_log.routine,
+				cb->error_log.type,
+				cb->error_log.error_code);
+		mas->qn_err = true;
+		complete_all(&mas->tx_cb);
+		complete_all(&mas->rx_cb);
+
+		break;
+	}
+}
+
+static void spi_gsi_rx_callback(void *cb)
+{
+	struct msm_gpi_dma_async_tx_cb_param *cb_param =
+			(struct msm_gpi_dma_async_tx_cb_param *)cb;
+	struct gsi_desc_cb *desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
+	struct spi_master *spi = desc_cb->spi;
+	struct spi_transfer *xfer = desc_cb->xfer;
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+	if (xfer->rx_buf) {
+		if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
+			GENI_SE_ERR(mas->ipc, true, mas->dev,
+			"%s: Unexpected GSI CB error\n", __func__);
+			return;
+		}
+		if (cb_param->length == xfer->len) {
+			GENI_SE_DBG(mas->ipc, false, mas->dev,
+			"%s\n", __func__);
+			complete(&mas->rx_cb);
+		} else {
+			GENI_SE_ERR(mas->ipc, true, mas->dev,
+			"%s: Length mismatch. Expected %d Callback %d\n",
+			__func__, xfer->len, cb_param->length);
+		}
+	}
+}
+
+static void spi_gsi_tx_callback(void *cb)
+{
+	struct msm_gpi_dma_async_tx_cb_param *cb_param = cb;
+	struct gsi_desc_cb *desc_cb = (struct gsi_desc_cb *)cb_param->userdata;
+	struct spi_master *spi = desc_cb->spi;
+	struct spi_transfer *xfer = desc_cb->xfer;
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+	if (xfer->tx_buf) {
+		if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) {
+			GENI_SE_ERR(mas->ipc, true, mas->dev,
+			"%s: Unexpected GSI CB error\n", __func__);
+			return;
+		}
+		if (cb_param->length == xfer->len) {
+			GENI_SE_DBG(mas->ipc, false, mas->dev,
+			"%s\n", __func__);
+			complete(&mas->tx_cb);
+		} else {
+			GENI_SE_ERR(mas->ipc, true, mas->dev,
+			"%s: Length mismatch. Expected %d Callback %d\n",
+			__func__, xfer->len, cb_param->length);
+		}
+	}
+}
+
+static int setup_gsi_xfer(struct spi_transfer *xfer,
+				struct spi_geni_master *mas,
+				struct spi_device *spi_slv,
+				struct spi_master *spi)
+{
+	int ret = 0;
+	struct msm_gpi_tre *c0_tre = NULL;
+	struct msm_gpi_tre *go_tre = NULL;
+	struct msm_gpi_tre *tx_tre = NULL;
+	struct msm_gpi_tre *rx_tre = NULL;
+	struct scatterlist *xfer_tx_sg = mas->gsi[mas->num_xfers].tx_sg;
+	struct scatterlist *xfer_rx_sg = &mas->gsi[mas->num_xfers].rx_sg;
+	int rx_nent = 0;
+	int tx_nent = 0;
+	u8 cmd = 0;
+	u8 cs = 0;
+	u32 rx_len = 0;
+	int go_flags = 0;
+	unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+	struct spi_geni_qcom_ctrl_data *delay_params = NULL;
+	u32 cs_clk_delay = 0;
+	u32 inter_words_delay = 0;
+
+	if (spi_slv->controller_data) {
+		delay_params =
+		(struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
+
+		cs_clk_delay =
+			delay_params->spi_cs_clk_delay;
+		inter_words_delay =
+			delay_params->spi_inter_words_delay;
+	}
+
+	if ((xfer->bits_per_word != mas->cur_word_len) ||
+		(xfer->speed_hz != mas->cur_speed_hz)) {
+		mas->cur_word_len = xfer->bits_per_word;
+		mas->cur_speed_hz = xfer->speed_hz;
+		tx_nent++;
+		c0_tre = setup_config0_tre(xfer, mas, spi_slv->mode,
+					cs_clk_delay, inter_words_delay);
+		if (IS_ERR_OR_NULL(c0_tre)) {
+			dev_err(mas->dev, "%s:Err setting c0tre:%d\n",
+							__func__, ret);
+			return PTR_ERR(c0_tre);
+		}
+	}
+
+	if (!(mas->cur_word_len % MIN_WORD_LEN)) {
+		rx_len = ((xfer->len << 3) / mas->cur_word_len);
+	} else {
+		int bytes_per_word = (mas->cur_word_len / BITS_PER_BYTE) + 1;
+
+		rx_len = (xfer->len / bytes_per_word);
+	}
+
+	if (xfer->tx_buf && xfer->rx_buf) {
+		cmd = SPI_FULL_DUPLEX;
+		tx_nent += 2;
+		rx_nent++;
+	} else if (xfer->tx_buf) {
+		cmd = SPI_TX_ONLY;
+		tx_nent += 2;
+		rx_len = 0;
+	} else if (xfer->rx_buf) {
+		cmd = SPI_RX_ONLY;
+		tx_nent++;
+		rx_nent++;
+	}
+
+	cs |= spi_slv->chip_select;
+	if (!xfer->cs_change) {
+		if (!list_is_last(&xfer->transfer_list,
+					&spi->cur_msg->transfers))
+			go_flags |= FRAGMENTATION;
+	}
+	go_tre = setup_go_tre(cmd, cs, rx_len, go_flags, mas);
+
+	sg_init_table(xfer_tx_sg, tx_nent);
+	if (rx_nent)
+		sg_init_table(xfer_rx_sg, rx_nent);
+
+	if (c0_tre)
+		sg_set_buf(xfer_tx_sg++, c0_tre, sizeof(*c0_tre));
+
+	sg_set_buf(xfer_tx_sg++, go_tre, sizeof(*go_tre));
+	mas->gsi[mas->num_xfers].desc_cb.spi = spi;
+	mas->gsi[mas->num_xfers].desc_cb.xfer = xfer;
+	if (cmd & SPI_RX_ONLY) {
+		rx_tre = &mas->gsi[mas->num_xfers].rx_dma_tre;
+		rx_tre = setup_dma_tre(rx_tre, xfer->rx_dma, xfer->len, mas, 0);
+		if (IS_ERR_OR_NULL(rx_tre)) {
+			dev_err(mas->dev, "Err setting up rx tre\n");
+			return PTR_ERR(rx_tre);
+		}
+		sg_set_buf(xfer_rx_sg, rx_tre, sizeof(*rx_tre));
+		mas->gsi[mas->num_xfers].rx_desc =
+			dmaengine_prep_slave_sg(mas->rx,
+				&mas->gsi[mas->num_xfers].rx_sg, rx_nent,
+						DMA_DEV_TO_MEM, flags);
+		if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].rx_desc)) {
+			dev_err(mas->dev, "Err setting up rx desc\n");
+			return -EIO;
+		}
+		mas->gsi[mas->num_xfers].rx_desc->callback =
+					spi_gsi_rx_callback;
+		mas->gsi[mas->num_xfers].rx_desc->callback_param =
+					&mas->gsi[mas->num_xfers].rx_cb_param;
+		mas->gsi[mas->num_xfers].rx_cb_param.userdata =
+					&mas->gsi[mas->num_xfers].desc_cb;
+		mas->num_rx_eot++;
+	}
+
+	if (cmd & SPI_TX_ONLY) {
+		tx_tre = &mas->gsi[mas->num_xfers].tx_dma_tre;
+		tx_tre = setup_dma_tre(tx_tre, xfer->tx_dma, xfer->len, mas, 1);
+		if (IS_ERR_OR_NULL(tx_tre)) {
+			dev_err(mas->dev, "Err setting up tx tre\n");
+			return PTR_ERR(tx_tre);
+		}
+		sg_set_buf(xfer_tx_sg++, tx_tre, sizeof(*tx_tre));
+		mas->num_tx_eot++;
+	}
+	mas->gsi[mas->num_xfers].tx_desc = dmaengine_prep_slave_sg(mas->tx,
+					mas->gsi[mas->num_xfers].tx_sg, tx_nent,
+					DMA_MEM_TO_DEV, flags);
+	if (IS_ERR_OR_NULL(mas->gsi[mas->num_xfers].tx_desc)) {
+		dev_err(mas->dev, "Err setting up tx desc\n");
+		return -EIO;
+	}
+	mas->gsi[mas->num_xfers].tx_desc->callback = spi_gsi_tx_callback;
+	mas->gsi[mas->num_xfers].tx_desc->callback_param =
+					&mas->gsi[mas->num_xfers].tx_cb_param;
+	mas->gsi[mas->num_xfers].tx_cb_param.userdata =
+					&mas->gsi[mas->num_xfers].desc_cb;
+	mas->gsi[mas->num_xfers].tx_cookie =
+			dmaengine_submit(mas->gsi[mas->num_xfers].tx_desc);
+	if (cmd & SPI_RX_ONLY)
+		mas->gsi[mas->num_xfers].rx_cookie =
+			dmaengine_submit(mas->gsi[mas->num_xfers].rx_desc);
+	dma_async_issue_pending(mas->tx);
+	if (cmd & SPI_RX_ONLY)
+		dma_async_issue_pending(mas->rx);
+	mas->num_xfers++;
+	return ret;
+}
+
+static int spi_geni_map_buf(struct spi_geni_master *mas,
+				struct spi_message *msg)
+{
+	struct spi_transfer *xfer;
+	int ret = 0;
+
+	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+		if (xfer->rx_buf) {
+			ret = geni_se_iommu_map_buf(mas->wrapper_dev,
+						&xfer->rx_dma, xfer->rx_buf,
+						xfer->len, DMA_FROM_DEVICE);
+			if (ret) {
+				GENI_SE_ERR(mas->ipc, true, mas->dev,
+				"%s: Mapping Rx buffer %d\n", __func__, ret);
+				return ret;
+			}
+		}
+
+		if (xfer->tx_buf) {
+			ret = geni_se_iommu_map_buf(mas->wrapper_dev,
+						&xfer->tx_dma,
+						(void *)xfer->tx_buf,
+						xfer->len, DMA_TO_DEVICE);
+			if (ret) {
+				GENI_SE_ERR(mas->ipc, true, mas->dev,
+				"%s: Mapping Tx buffer %d\n", __func__, ret);
+				return ret;
+			}
+		}
+	}
+	return 0;
+}
+
+static void spi_geni_unmap_buf(struct spi_geni_master *mas,
+				struct spi_message *msg)
+{
+	struct spi_transfer *xfer;
+
+	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+		if (xfer->rx_buf)
+			geni_se_iommu_unmap_buf(mas->wrapper_dev, &xfer->rx_dma,
+						xfer->len, DMA_FROM_DEVICE);
+		if (xfer->tx_buf)
+			geni_se_iommu_unmap_buf(mas->wrapper_dev, &xfer->tx_dma,
+						xfer->len, DMA_TO_DEVICE);
+	}
+}
+
+static int spi_geni_prepare_message(struct spi_master *spi,
+					struct spi_message *spi_msg)
+{
+	int ret = 0;
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+	mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg);
+
+	if (mas->cur_xfer_mode < 0) {
+		dev_err(mas->dev, "%s: Couldn't select mode %d\n", __func__,
+							mas->cur_xfer_mode);
+		ret = -EINVAL;
+	} else if (mas->cur_xfer_mode == GSI_DMA) {
+		memset(mas->gsi, 0,
+				(sizeof(struct spi_geni_gsi) * NUM_SPI_XFER));
+		geni_se_select_mode(mas->base, GSI_DMA);
+		ret = spi_geni_map_buf(mas, spi_msg);
+	} else {
+		geni_se_select_mode(mas->base, mas->cur_xfer_mode);
+		ret = setup_fifo_params(spi_msg->spi, spi);
+	}
+
+	return ret;
+}
+
+static int spi_geni_unprepare_message(struct spi_master *spi_mas,
+					struct spi_message *spi_msg)
+{
+	struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
+
+	mas->cur_speed_hz = 0;
+	mas->cur_word_len = 0;
+	if (mas->cur_xfer_mode == GSI_DMA)
+		spi_geni_unmap_buf(mas, spi_msg);
+	return 0;
+}
+
+static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
+{
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+	int ret = 0, count = 0;
+	u32 max_speed = spi->cur_msg->spi->max_speed_hz;
+	struct se_geni_rsc *rsc = &mas->spi_rsc;
+
+	/* Adjust the IB based on the max speed of the slave.*/
+	rsc->ib = max_speed * DEFAULT_BUS_WIDTH;
+	if (mas->shared_se) {
+		struct se_geni_rsc *rsc;
+		int ret = 0;
+
+		rsc = &mas->spi_rsc;
+		ret = pinctrl_select_state(rsc->geni_pinctrl,
+						rsc->geni_gpio_active);
+		if (ret)
+			GENI_SE_ERR(mas->ipc, false, NULL,
+			"%s: Error %d pinctrl_select_state\n", __func__, ret);
+	}
+
+	ret = pm_runtime_get_sync(mas->dev);
+	if (ret < 0) {
+		dev_err(mas->dev, "%s:Error enabling SE resources %d\n",
+							__func__, ret);
+		pm_runtime_put_noidle(mas->dev);
+		goto exit_prepare_transfer_hardware;
+	} else {
+		ret = 0;
+	}
+	if (mas->dis_autosuspend) {
+		count = atomic_read(&mas->dev->power.usage_count);
+		if (count <= 0)
+			GENI_SE_ERR(mas->ipc, false, NULL,
+				"resume usage count mismatch:%d", count);
+	}
+	if (unlikely(!mas->setup)) {
+		int proto = get_se_proto(mas->base);
+		unsigned int major;
+		unsigned int minor;
+		unsigned int step;
+		int hw_ver;
+
+		if (unlikely(proto != SPI)) {
+			dev_err(mas->dev, "Invalid proto %d\n", proto);
+			return -ENXIO;
+		}
+		geni_se_init(mas->base, 0x0, (mas->tx_fifo_depth - 2));
+		mas->tx_fifo_depth = get_tx_fifo_depth(mas->base);
+		mas->rx_fifo_depth = get_rx_fifo_depth(mas->base);
+		mas->tx_fifo_width = get_tx_fifo_width(mas->base);
+		mas->oversampling = 1;
+		/* Transmit an entire FIFO worth of data per IRQ */
+		mas->tx_wm = 1;
+
+		mas->tx = dma_request_slave_channel(mas->dev, "tx");
+		if (IS_ERR_OR_NULL(mas->tx)) {
+			dev_info(mas->dev, "Failed to get tx DMA ch %ld\n",
+							PTR_ERR(mas->tx));
+		} else {
+			mas->rx = dma_request_slave_channel(mas->dev, "rx");
+			if (IS_ERR_OR_NULL(mas->rx)) {
+				dev_info(mas->dev, "Failed to get rx DMA ch %ld\n",
+							PTR_ERR(mas->rx));
+				dma_release_channel(mas->tx);
+			}
+			mas->gsi = devm_kzalloc(mas->dev,
+				(sizeof(struct spi_geni_gsi) * NUM_SPI_XFER),
+				GFP_KERNEL);
+			if (IS_ERR_OR_NULL(mas->gsi)) {
+				dev_err(mas->dev, "Failed to get GSI mem\n");
+				dma_release_channel(mas->tx);
+				dma_release_channel(mas->rx);
+				mas->tx = NULL;
+				mas->rx = NULL;
+				goto setup_ipc;
+			}
+			mas->tx_event.init.callback = spi_gsi_ch_cb;
+			mas->tx_event.init.cb_param = spi;
+			mas->tx_event.cmd = MSM_GPI_INIT;
+			mas->tx->private = &mas->tx_event;
+			mas->rx_event.init.callback = spi_gsi_ch_cb;
+			mas->rx_event.init.cb_param = spi;
+			mas->rx_event.cmd = MSM_GPI_INIT;
+			mas->rx->private = &mas->rx_event;
+			if (dmaengine_slave_config(mas->tx, NULL)) {
+				dev_err(mas->dev, "Failed to Config Tx\n");
+				dma_release_channel(mas->tx);
+				dma_release_channel(mas->rx);
+				mas->tx = NULL;
+				mas->rx = NULL;
+				goto setup_ipc;
+			}
+			if (dmaengine_slave_config(mas->rx, NULL)) {
+				dev_err(mas->dev, "Failed to Config Rx\n");
+				dma_release_channel(mas->tx);
+				dma_release_channel(mas->rx);
+				mas->tx = NULL;
+				mas->rx = NULL;
+				goto setup_ipc;
+			}
+
+		}
+setup_ipc:
+		mas->ipc = ipc_log_context_create(4, dev_name(mas->dev), 0);
+		dev_info(mas->dev, "tx_fifo %d rx_fifo %d tx_width %d\n",
+			mas->tx_fifo_depth, mas->rx_fifo_depth,
+			mas->tx_fifo_width);
+		mas->setup = true;
+		hw_ver = geni_se_qupv3_hw_version(mas->wrapper_dev, &major,
+							&minor, &step);
+		if (hw_ver)
+			dev_err(mas->dev, "%s:Err getting HW version %d\n",
+							__func__, hw_ver);
+		else {
+			if ((major == 1) && (minor == 0))
+				mas->oversampling = 2;
+			GENI_SE_DBG(mas->ipc, false, mas->dev,
+				"%s:Major:%d Minor:%d step:%dos%d\n",
+			__func__, major, minor, step, mas->oversampling);
+		}
+		mas->shared_se =
+			(geni_read_reg(mas->base, GENI_IF_FIFO_DISABLE_RO) &
+							FIFO_IF_DISABLE);
+		if (mas->dis_autosuspend)
+			GENI_SE_DBG(mas->ipc, false, mas->dev,
+					"Auto Suspend is disabled\n");
+	}
+exit_prepare_transfer_hardware:
+	return ret;
+}
+
+static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
+{
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+	int count = 0;
+
+	if (mas->shared_se) {
+		struct se_geni_rsc *rsc;
+		int ret = 0;
+
+		rsc = &mas->spi_rsc;
+		ret = pinctrl_select_state(rsc->geni_pinctrl,
+						rsc->geni_gpio_sleep);
+		if (ret)
+			GENI_SE_ERR(mas->ipc, false, NULL,
+			"%s: Error %d pinctrl_select_state\n", __func__, ret);
+	}
+
+	if (mas->dis_autosuspend) {
+		pm_runtime_put_sync(mas->dev);
+		count = atomic_read(&mas->dev->power.usage_count);
+		if (count < 0)
+			GENI_SE_ERR(mas->ipc, false, NULL,
+				"suspend usage count mismatch:%d", count);
+	} else {
+		pm_runtime_mark_last_busy(mas->dev);
+		pm_runtime_put_autosuspend(mas->dev);
+	}
+	return 0;
+}
+
+static void setup_fifo_xfer(struct spi_transfer *xfer,
+				struct spi_geni_master *mas, u16 mode,
+				struct spi_master *spi)
+{
+	u32 m_cmd = 0;
+	u32 m_param = 0;
+	u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
+	u32 trans_len = 0, fifo_size = 0;
+
+	if (xfer->bits_per_word != mas->cur_word_len) {
+		spi_setup_word_len(mas, mode, xfer->bits_per_word);
+		mas->cur_word_len = xfer->bits_per_word;
+	}
+
+	/* Speed and bits per word can be overridden per transfer */
+	if (xfer->speed_hz != mas->cur_speed_hz) {
+		int ret = 0;
+		u32 clk_sel = 0;
+		u32 m_clk_cfg = 0;
+		int idx = 0;
+		int div = 0;
+
+		ret = get_spi_clk_cfg(xfer->speed_hz, mas, &idx, &div);
+		if (ret) {
+			dev_err(mas->dev, "%s:Err setting clks:%d\n",
+								__func__, ret);
+			return;
+		}
+		mas->cur_speed_hz = xfer->speed_hz;
+		clk_sel |= (idx & CLK_SEL_MSK);
+		m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
+		geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
+		geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
+	}
+
+	mas->tx_rem_bytes = 0;
+	mas->rx_rem_bytes = 0;
+	if (xfer->tx_buf && xfer->rx_buf)
+		m_cmd = SPI_FULL_DUPLEX;
+	else if (xfer->tx_buf)
+		m_cmd = SPI_TX_ONLY;
+	else if (xfer->rx_buf)
+		m_cmd = SPI_RX_ONLY;
+
+	spi_tx_cfg &= ~CS_TOGGLE;
+	if (!(mas->cur_word_len % MIN_WORD_LEN)) {
+		trans_len =
+			((xfer->len << 3) / mas->cur_word_len) & TRANS_LEN_MSK;
+	} else {
+		int bytes_per_word = (mas->cur_word_len / BITS_PER_BYTE) + 1;
+
+		trans_len = (xfer->len / bytes_per_word) & TRANS_LEN_MSK;
+	}
+
+	if (!xfer->cs_change) {
+		if (!list_is_last(&xfer->transfer_list,
+					&spi->cur_msg->transfers))
+			m_param |= FRAGMENTATION;
+	}
+
+	mas->cur_xfer = xfer;
+	if (m_cmd & SPI_TX_ONLY) {
+		mas->tx_rem_bytes = xfer->len;
+		geni_write_reg(trans_len, mas->base, SE_SPI_TX_TRANS_LEN);
+	}
+
+	if (m_cmd & SPI_RX_ONLY) {
+		geni_write_reg(trans_len, mas->base, SE_SPI_RX_TRANS_LEN);
+		mas->rx_rem_bytes = xfer->len;
+	}
+
+	fifo_size =
+		(mas->tx_fifo_depth * mas->tx_fifo_width / mas->cur_word_len);
+	if (trans_len > fifo_size) {
+		if (mas->cur_xfer_mode != SE_DMA) {
+			mas->cur_xfer_mode = SE_DMA;
+			geni_se_select_mode(mas->base, mas->cur_xfer_mode);
+		}
+	} else {
+		if (mas->cur_xfer_mode != FIFO_MODE) {
+			mas->cur_xfer_mode = FIFO_MODE;
+			geni_se_select_mode(mas->base, mas->cur_xfer_mode);
+		}
+	}
+
+	geni_write_reg(spi_tx_cfg, mas->base, SE_SPI_TRANS_CFG);
+	geni_setup_m_cmd(mas->base, m_cmd, m_param);
+	GENI_SE_DBG(mas->ipc, false, mas->dev,
+	"%s: trans_len %d xferlen%d tx_cfg 0x%x cmd 0x%x cs%d mode%d\n",
+		__func__, trans_len, xfer->len, spi_tx_cfg, m_cmd,
+			xfer->cs_change, mas->cur_xfer_mode);
+	if ((m_cmd & SPI_RX_ONLY) && (mas->cur_xfer_mode == SE_DMA)) {
+		int ret = 0;
+
+		ret =  geni_se_rx_dma_prep(mas->wrapper_dev, mas->base,
+				xfer->rx_buf, xfer->len, &xfer->rx_dma);
+		if (ret)
+			GENI_SE_ERR(mas->ipc, true, mas->dev,
+				"Failed to setup Rx dma %d\n", ret);
+	}
+	if (m_cmd & SPI_TX_ONLY) {
+		if (mas->cur_xfer_mode == FIFO_MODE) {
+			geni_write_reg(mas->tx_wm, mas->base,
+					SE_GENI_TX_WATERMARK_REG);
+		} else if (mas->cur_xfer_mode == SE_DMA) {
+			int ret = 0;
+
+			ret =  geni_se_tx_dma_prep(mas->wrapper_dev, mas->base,
+					(void *)xfer->tx_buf, xfer->len,
+							&xfer->tx_dma);
+			if (ret)
+				GENI_SE_ERR(mas->ipc, true, mas->dev,
+					"Failed to setup tx dma %d\n", ret);
+		}
+	}
+
+	/* Ensure all writes are done before the WM interrupt */
+	mb();
+}
+
+static void handle_fifo_timeout(struct spi_geni_master *mas,
+					struct spi_transfer *xfer)
+{
+	unsigned long timeout;
+
+	geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
+	reinit_completion(&mas->xfer_done);
+	geni_cancel_m_cmd(mas->base);
+	if (mas->cur_xfer_mode == FIFO_MODE)
+		geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
+	/* Ensure cmd cancel is written */
+	mb();
+	timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
+	if (!timeout) {
+		reinit_completion(&mas->xfer_done);
+		geni_abort_m_cmd(mas->base);
+		/* Ensure cmd abort is written */
+		mb();
+		timeout = wait_for_completion_timeout(&mas->xfer_done,
+								HZ);
+		if (!timeout)
+			dev_err(mas->dev,
+				"Failed to cancel/abort m_cmd\n");
+	}
+	if (mas->cur_xfer_mode == SE_DMA) {
+		if (xfer->tx_buf)
+			geni_se_tx_dma_unprep(mas->wrapper_dev,
+					xfer->tx_dma, xfer->len);
+		if (xfer->rx_buf)
+			geni_se_rx_dma_unprep(mas->wrapper_dev,
+					xfer->rx_dma, xfer->len);
+	}
+
+}
+
+static int spi_geni_transfer_one(struct spi_master *spi,
+				struct spi_device *slv,
+				struct spi_transfer *xfer)
+{
+	int ret = 0;
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+	unsigned long timeout;
+
+	if ((xfer->tx_buf == NULL) && (xfer->rx_buf == NULL)) {
+		dev_err(mas->dev, "Invalid xfer both tx rx are NULL\n");
+		return -EINVAL;
+	}
+
+	if (mas->cur_xfer_mode != GSI_DMA) {
+		reinit_completion(&mas->xfer_done);
+		setup_fifo_xfer(xfer, mas, slv->mode, spi);
+		timeout = wait_for_completion_timeout(&mas->xfer_done,
+					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
+		if (!timeout) {
+			GENI_SE_ERR(mas->ipc, true, mas->dev,
+				"Xfer[len %d tx %pK rx %pK n %d] timed out.\n",
+						xfer->len, xfer->tx_buf,
+						xfer->rx_buf,
+						xfer->bits_per_word);
+			mas->cur_xfer = NULL;
+			ret = -ETIMEDOUT;
+			goto err_fifo_geni_transfer_one;
+		}
+
+		if (mas->cur_xfer_mode == SE_DMA) {
+			if (xfer->tx_buf)
+				geni_se_tx_dma_unprep(mas->wrapper_dev,
+					xfer->tx_dma, xfer->len);
+			if (xfer->rx_buf)
+				geni_se_rx_dma_unprep(mas->wrapper_dev,
+					xfer->rx_dma, xfer->len);
+		}
+	} else {
+		mas->num_tx_eot = 0;
+		mas->num_rx_eot = 0;
+		mas->num_xfers = 0;
+		reinit_completion(&mas->tx_cb);
+		reinit_completion(&mas->rx_cb);
+
+		setup_gsi_xfer(xfer, mas, slv, spi);
+		if ((mas->num_xfers >= NUM_SPI_XFER) ||
+			(list_is_last(&xfer->transfer_list,
+					&spi->cur_msg->transfers))) {
+			int i;
+
+			for (i = 0 ; i < mas->num_tx_eot; i++) {
+				timeout =
+				wait_for_completion_timeout(
+					&mas->tx_cb,
+					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
+				if (timeout <= 0) {
+					GENI_SE_ERR(mas->ipc, true, mas->dev,
+					"Tx[%d] timeout%lu\n", i, timeout);
+					ret = -ETIMEDOUT;
+					goto err_gsi_geni_transfer_one;
+				}
+			}
+			for (i = 0 ; i < mas->num_rx_eot; i++) {
+				timeout =
+				wait_for_completion_timeout(
+					&mas->rx_cb,
+					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
+				if (timeout <= 0) {
+					GENI_SE_ERR(mas->ipc, true, mas->dev,
+					 "Rx[%d] timeout%lu\n", i, timeout);
+					ret = -ETIMEDOUT;
+					goto err_gsi_geni_transfer_one;
+				}
+			}
+			if (mas->qn_err) {
+				ret = -EIO;
+				mas->qn_err = false;
+				goto err_gsi_geni_transfer_one;
+			}
+		}
+	}
+	return ret;
+err_gsi_geni_transfer_one:
+	geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
+	dmaengine_terminate_all(mas->tx);
+	return ret;
+err_fifo_geni_transfer_one:
+	handle_fifo_timeout(mas, xfer);
+	return ret;
+}
+
+static void geni_spi_handle_tx(struct spi_geni_master *mas)
+{
+	int i = 0;
+	int tx_fifo_width = (mas->tx_fifo_width >> 3);
+	int max_bytes = 0;
+	const u8 *tx_buf = NULL;
+
+	if (!mas->cur_xfer)
+		return;
+
+	/*
+	 * For non-byte aligned bits-per-word values:
+	 * Assumption is that each SPI word will be accomodated in
+	 * ceil (bits_per_word / bits_per_byte)
+	 * and the next SPI word starts at the next byte.
+	 * In such cases, we can fit 1 SPI word per FIFO word so adjust the
+	 * max byte that can be sent per IRQ accordingly.
+	 */
+	if ((mas->tx_fifo_width % mas->cur_word_len))
+		max_bytes = (mas->tx_fifo_depth - mas->tx_wm) *
+				((mas->cur_word_len / BITS_PER_BYTE) + 1);
+	else
+		max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * tx_fifo_width;
+	tx_buf = mas->cur_xfer->tx_buf;
+	tx_buf += (mas->cur_xfer->len - mas->tx_rem_bytes);
+	max_bytes = min_t(int, mas->tx_rem_bytes, max_bytes);
+	while (i < max_bytes) {
+		int j;
+		u32 fifo_word = 0;
+		u8 *fifo_byte;
+		int bytes_per_fifo = tx_fifo_width;
+		int bytes_to_write = 0;
+
+		if ((mas->tx_fifo_width % mas->cur_word_len))
+			bytes_per_fifo =
+				(mas->cur_word_len / BITS_PER_BYTE) + 1;
+		bytes_to_write = min_t(int, (max_bytes - i), bytes_per_fifo);
+		fifo_byte = (u8 *)&fifo_word;
+		for (j = 0; j < bytes_to_write; j++)
+			fifo_byte[j] = tx_buf[i++];
+		geni_write_reg(fifo_word, mas->base, SE_GENI_TX_FIFOn);
+		/* Ensure FIFO writes are written in order */
+		mb();
+	}
+	mas->tx_rem_bytes -= max_bytes;
+	if (!mas->tx_rem_bytes) {
+		geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
+		/* Barrier here before return to prevent further ISRs */
+		mb();
+	}
+}
+
+static void geni_spi_handle_rx(struct spi_geni_master *mas)
+{
+	int i = 0;
+	int fifo_width = (mas->tx_fifo_width >> 3);
+	u32 rx_fifo_status = geni_read_reg(mas->base, SE_GENI_RX_FIFO_STATUS);
+	int rx_bytes = 0;
+	int rx_wc = 0;
+	u8 *rx_buf = NULL;
+
+	if (!mas->cur_xfer)
+		return;
+
+	rx_buf = mas->cur_xfer->rx_buf;
+	rx_wc = (rx_fifo_status & RX_FIFO_WC_MSK);
+	if (rx_fifo_status & RX_LAST) {
+		int rx_last_byte_valid =
+			(rx_fifo_status & RX_LAST_BYTE_VALID_MSK)
+					>> RX_LAST_BYTE_VALID_SHFT;
+		if (rx_last_byte_valid && (rx_last_byte_valid < 4)) {
+			rx_wc -= 1;
+			rx_bytes += rx_last_byte_valid;
+		}
+	}
+	if (!(mas->tx_fifo_width % mas->cur_word_len))
+		rx_bytes += rx_wc * fifo_width;
+	else
+		rx_bytes += rx_wc *
+			((mas->cur_word_len / BITS_PER_BYTE) + 1);
+	rx_bytes = min_t(int, mas->rx_rem_bytes, rx_bytes);
+	rx_buf += (mas->cur_xfer->len - mas->rx_rem_bytes);
+	while (i < rx_bytes) {
+		u32 fifo_word = 0;
+		u8 *fifo_byte;
+		int bytes_per_fifo = fifo_width;
+		int read_bytes = 0;
+		int j;
+
+		if ((mas->tx_fifo_width % mas->cur_word_len))
+			bytes_per_fifo =
+				(mas->cur_word_len / BITS_PER_BYTE) + 1;
+		read_bytes = min_t(int, (rx_bytes - i), bytes_per_fifo);
+		fifo_word = geni_read_reg(mas->base, SE_GENI_RX_FIFOn);
+		fifo_byte = (u8 *)&fifo_word;
+		for (j = 0; j < read_bytes; j++)
+			rx_buf[i++] = fifo_byte[j];
+	}
+	mas->rx_rem_bytes -= rx_bytes;
+}
+
+static irqreturn_t geni_spi_irq(int irq, void *data)
+{
+	struct spi_geni_master *mas = data;
+	u32 m_irq = 0;
+
+	if (pm_runtime_status_suspended(mas->dev)) {
+		GENI_SE_DBG(mas->ipc, false, mas->dev,
+				"%s: device is suspended\n", __func__);
+		goto exit_geni_spi_irq;
+	}
+	m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
+	if (mas->cur_xfer_mode == FIFO_MODE) {
+		if ((m_irq & M_RX_FIFO_WATERMARK_EN) ||
+						(m_irq & M_RX_FIFO_LAST_EN))
+			geni_spi_handle_rx(mas);
+
+		if ((m_irq & M_TX_FIFO_WATERMARK_EN))
+			geni_spi_handle_tx(mas);
+
+		if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
+			(m_irq & M_CMD_ABORT_EN)) {
+			mas->cmd_done = true;
+			/*
+			 * If this happens, then a CMD_DONE came before all the
+			 * buffer bytes were sent out. This is unusual, log this
+			 * condition and disable the WM interrupt to prevent the
+			 * system from stalling due an interrupt storm.
+			 * If this happens when all Rx bytes haven't been
+			 * received, log the condition.
+			 */
+			if (mas->tx_rem_bytes) {
+				geni_write_reg(0, mas->base,
+						SE_GENI_TX_WATERMARK_REG);
+				GENI_SE_DBG(mas->ipc, false, mas->dev,
+					"%s:Premature Done.tx_rem%d bpw%d\n",
+					__func__, mas->tx_rem_bytes,
+						mas->cur_word_len);
+			}
+			if (mas->rx_rem_bytes)
+				GENI_SE_DBG(mas->ipc, false, mas->dev,
+					"%s:Premature Done.rx_rem%d bpw%d\n",
+						__func__, mas->rx_rem_bytes,
+							mas->cur_word_len);
+		}
+	} else if (mas->cur_xfer_mode == SE_DMA) {
+		u32 dma_tx_status = geni_read_reg(mas->base,
+							SE_DMA_TX_IRQ_STAT);
+		u32 dma_rx_status = geni_read_reg(mas->base,
+							SE_DMA_RX_IRQ_STAT);
+
+		if (dma_tx_status)
+			geni_write_reg(dma_tx_status, mas->base,
+						SE_DMA_TX_IRQ_CLR);
+		if (dma_rx_status)
+			geni_write_reg(dma_rx_status, mas->base,
+						SE_DMA_RX_IRQ_CLR);
+		if (dma_tx_status & TX_DMA_DONE)
+			mas->tx_rem_bytes = 0;
+		if (dma_rx_status & RX_DMA_DONE)
+			mas->rx_rem_bytes = 0;
+		if (!mas->tx_rem_bytes && !mas->rx_rem_bytes)
+			mas->cmd_done = true;
+		if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN))
+			mas->cmd_done = true;
+	}
+exit_geni_spi_irq:
+	geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
+	if (mas->cmd_done) {
+		mas->cmd_done = false;
+		complete(&mas->xfer_done);
+	}
+	return IRQ_HANDLED;
+}
+
+static int spi_geni_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct spi_master *spi;
+	struct spi_geni_master *geni_mas;
+	struct se_geni_rsc *rsc;
+	struct resource *res;
+	struct platform_device *wrapper_pdev;
+	struct device_node *wrapper_ph_node;
+	bool rt_pri;
+
+	spi = spi_alloc_master(&pdev->dev, sizeof(struct spi_geni_master));
+	if (!spi) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "Failed to alloc spi struct\n");
+		goto spi_geni_probe_err;
+	}
+
+	platform_set_drvdata(pdev, spi);
+	geni_mas = spi_master_get_devdata(spi);
+	rsc = &geni_mas->spi_rsc;
+	geni_mas->dev = &pdev->dev;
+	spi->dev.of_node = pdev->dev.of_node;
+	wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
+					"qcom,wrapper-core", 0);
+	if (IS_ERR_OR_NULL(wrapper_ph_node)) {
+		ret = PTR_ERR(wrapper_ph_node);
+		dev_err(&pdev->dev, "No wrapper core defined\n");
+		goto spi_geni_probe_err;
+	}
+	wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
+	of_node_put(wrapper_ph_node);
+	if (IS_ERR_OR_NULL(wrapper_pdev)) {
+		ret = PTR_ERR(wrapper_pdev);
+		dev_err(&pdev->dev, "Cannot retrieve wrapper device\n");
+		goto spi_geni_probe_err;
+	}
+	geni_mas->wrapper_dev = &wrapper_pdev->dev;
+	geni_mas->spi_rsc.wrapper_dev = &wrapper_pdev->dev;
+	ret = geni_se_resources_init(rsc, SPI_CORE2X_VOTE,
+				     (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
+	if (ret) {
+		dev_err(&pdev->dev, "Error geni_se_resources_init\n");
+		goto spi_geni_probe_err;
+	}
+
+	rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
+		dev_err(&pdev->dev, "No pinctrl config specified!\n");
+		ret = PTR_ERR(rsc->geni_pinctrl);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->geni_gpio_active = pinctrl_lookup_state(rsc->geni_pinctrl,
+							PINCTRL_DEFAULT);
+	if (IS_ERR_OR_NULL(rsc->geni_gpio_active)) {
+		dev_err(&pdev->dev, "No default config specified!\n");
+		ret = PTR_ERR(rsc->geni_gpio_active);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->geni_gpio_sleep = pinctrl_lookup_state(rsc->geni_pinctrl,
+							PINCTRL_SLEEP);
+	if (IS_ERR_OR_NULL(rsc->geni_gpio_sleep)) {
+		dev_err(&pdev->dev, "No sleep config specified!\n");
+		ret = PTR_ERR(rsc->geni_gpio_sleep);
+		goto spi_geni_probe_err;
+	}
+
+	ret = pinctrl_select_state(rsc->geni_pinctrl,
+					rsc->geni_gpio_sleep);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to set sleep configuration\n");
+		goto spi_geni_probe_err;
+	}
+
+	rsc->se_clk = devm_clk_get(&pdev->dev, "se-clk");
+	if (IS_ERR(rsc->se_clk)) {
+		ret = PTR_ERR(rsc->se_clk);
+		dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
+	if (IS_ERR(rsc->m_ahb_clk)) {
+		ret = PTR_ERR(rsc->m_ahb_clk);
+		dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
+	if (IS_ERR(rsc->s_ahb_clk)) {
+		ret = PTR_ERR(rsc->s_ahb_clk);
+		dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
+		goto spi_geni_probe_err;
+	}
+
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (ret) {
+		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (ret) {
+			dev_err(&pdev->dev, "could not set DMA mask\n");
+			goto spi_geni_probe_err;
+		}
+	}
+
+	if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+				&spi->max_speed_hz)) {
+		dev_err(&pdev->dev, "Max frequency not specified.\n");
+		ret = -ENXIO;
+		goto spi_geni_probe_err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "se_phys");
+	if (!res) {
+		ret = -ENXIO;
+		dev_err(&pdev->dev, "Err getting IO region\n");
+		goto spi_geni_probe_err;
+	}
+
+	rt_pri = of_property_read_bool(pdev->dev.of_node, "qcom,rt");
+	if (rt_pri)
+		spi->rt = true;
+	geni_mas->dis_autosuspend =
+		of_property_read_bool(pdev->dev.of_node,
+				"qcom,disable-autosuspend");
+	geni_mas->phys_addr = res->start;
+	geni_mas->size = resource_size(res);
+	geni_mas->base = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!geni_mas->base) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "Err IO mapping iomem\n");
+		goto spi_geni_probe_err;
+	}
+
+	geni_mas->irq = platform_get_irq(pdev, 0);
+	if (geni_mas->irq < 0) {
+		dev_err(&pdev->dev, "Err getting IRQ\n");
+		ret = geni_mas->irq;
+		goto spi_geni_probe_unmap;
+	}
+	ret = devm_request_irq(&pdev->dev, geni_mas->irq, geni_spi_irq,
+			       IRQF_TRIGGER_HIGH, "spi_geni", geni_mas);
+	if (ret) {
+		dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
+				   geni_mas->irq, ret);
+		goto spi_geni_probe_unmap;
+	}
+
+	spi->mode_bits = (SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH);
+	spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+	spi->num_chipselect = SPI_NUM_CHIPSELECT;
+	spi->prepare_transfer_hardware = spi_geni_prepare_transfer_hardware;
+	spi->prepare_message = spi_geni_prepare_message;
+	spi->unprepare_message = spi_geni_unprepare_message;
+	spi->transfer_one = spi_geni_transfer_one;
+	spi->unprepare_transfer_hardware
+			= spi_geni_unprepare_transfer_hardware;
+	spi->auto_runtime_pm = false;
+
+	init_completion(&geni_mas->xfer_done);
+	init_completion(&geni_mas->tx_cb);
+	init_completion(&geni_mas->rx_cb);
+	pm_runtime_set_suspended(&pdev->dev);
+	if (!geni_mas->dis_autosuspend) {
+		pm_runtime_set_autosuspend_delay(&pdev->dev,
+					SPI_AUTO_SUSPEND_DELAY);
+		pm_runtime_use_autosuspend(&pdev->dev);
+	}
+	pm_runtime_enable(&pdev->dev);
+	ret = spi_register_master(spi);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register SPI master\n");
+		goto spi_geni_probe_unmap;
+	}
+	return ret;
+spi_geni_probe_unmap:
+	devm_iounmap(&pdev->dev, geni_mas->base);
+spi_geni_probe_err:
+	spi_master_put(spi);
+	return ret;
+}
+
+static int spi_geni_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct spi_geni_master *geni_mas = spi_master_get_devdata(master);
+
+	spi_unregister_master(master);
+	se_geni_resources_off(&geni_mas->spi_rsc);
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_geni_runtime_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct spi_master *spi = get_spi_master(dev);
+	struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
+
+	if (geni_mas->shared_se) {
+		ret = se_geni_clks_off(&geni_mas->spi_rsc);
+		if (ret)
+			GENI_SE_ERR(geni_mas->ipc, false, NULL,
+			"%s: Error %d turning off clocks\n", __func__, ret);
+	} else {
+		ret = se_geni_resources_off(&geni_mas->spi_rsc);
+	}
+	return ret;
+}
+
+static int spi_geni_runtime_resume(struct device *dev)
+{
+	int ret = 0;
+	struct spi_master *spi = get_spi_master(dev);
+	struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
+
+	if (geni_mas->shared_se) {
+		ret = se_geni_clks_on(&geni_mas->spi_rsc);
+		if (ret)
+			GENI_SE_ERR(geni_mas->ipc, false, NULL,
+			"%s: Error %d turning on clocks\n", __func__, ret);
+	} else {
+		ret = se_geni_resources_on(&geni_mas->spi_rsc);
+	}
+	return ret;
+}
+
+static int spi_geni_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_suspend(struct device *dev)
+{
+	int ret = 0;
+
+	if (!pm_runtime_status_suspended(dev)) {
+		struct spi_master *spi = get_spi_master(dev);
+		struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
+
+		if (list_empty(&spi->queue) && !spi->cur_msg) {
+			GENI_SE_ERR(geni_mas->ipc, true, dev,
+					"%s: Force suspend", __func__);
+			ret = spi_geni_runtime_suspend(dev);
+			if (ret) {
+				GENI_SE_ERR(geni_mas->ipc, true, dev,
+					"Force suspend Failed:%d", ret);
+			} else {
+				pm_runtime_disable(dev);
+				pm_runtime_set_suspended(dev);
+				pm_runtime_enable(dev);
+			}
+		} else {
+			ret = -EBUSY;
+		}
+	}
+	return ret;
+}
+#else
+static int spi_geni_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_suspend(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops spi_geni_pm_ops = {
+	SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
+					spi_geni_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
+};
+
+static const struct of_device_id spi_geni_dt_match[] = {
+	{ .compatible = "qcom,spi-geni" },
+	{}
+};
+
+static struct platform_driver spi_geni_driver = {
+	.probe  = spi_geni_probe,
+	.remove = spi_geni_remove,
+	.driver = {
+		.name = "spi_geni",
+		.pm = &spi_geni_pm_ops,
+		.of_match_table = spi_geni_dt_match,
+	},
+};
+module_platform_driver(spi_geni_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_geni");
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 508c61c..e2be7da 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1455,13 +1455,26 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:omap2_mcspi");
 
-#ifdef	CONFIG_SUSPEND
-static int omap2_mcspi_suspend_noirq(struct device *dev)
+static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
 {
-	return pinctrl_pm_select_sleep_state(dev);
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+	int error;
+
+	error = pinctrl_pm_select_sleep_state(dev);
+	if (error)
+		dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
+			 __func__, error);
+
+	error = spi_master_suspend(master);
+	if (error)
+		dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
+			 __func__, error);
+
+	return pm_runtime_force_suspend(dev);
 }
 
-static int omap2_mcspi_resume_noirq(struct device *dev)
+static int __maybe_unused omap2_mcspi_resume(struct device *dev)
 {
 	struct spi_master *master = dev_get_drvdata(dev);
 	struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
@@ -1472,17 +1485,17 @@ static int omap2_mcspi_resume_noirq(struct device *dev)
 		dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
 			 __func__, error);
 
-	return 0;
+	error = spi_master_resume(master);
+	if (error)
+		dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
+			 __func__, error);
+
+	return pm_runtime_force_resume(dev);
 }
 
-#else
-#define omap2_mcspi_suspend_noirq	NULL
-#define omap2_mcspi_resume_noirq	NULL
-#endif
-
 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
-	.suspend_noirq = omap2_mcspi_suspend_noirq,
-	.resume_noirq = omap2_mcspi_resume_noirq,
+	SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
+				omap2_mcspi_resume)
 	.runtime_resume	= omap_mcspi_runtime_resume,
 };
 
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index c38298d..4f120e7 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -2289,7 +2289,7 @@ static int rtw_wx_read32(struct net_device *dev,
 exit:
 	kfree(ptmp);
 
-	return 0;
+	return ret;
 }
 
 static int rtw_wx_write32(struct net_device *dev,
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 7e13ab6..3e312c9 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -31,6 +31,7 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/cpu_cooling.h>
+#include <linux/of_device.h>
 
 #include <trace/events/thermal.h>
 
@@ -80,6 +81,10 @@ struct time_in_idle {
  *	cooling	devices.
  * @clipped_freq: integer value representing the absolute value of the clipped
  *	frequency.
+ * @cpufreq_floor_state: integer value representing the frequency floor state
+ *	of cpufreq cooling devices.
+ * @floor_freq: integer value representing the absolute value of the floor
+ *	frequency.
  * @max_level: maximum cooling level. One less than total number of valid
  *	cpufreq frequencies.
  * @freq_table: Freq table in descending order of frequencies
@@ -97,12 +102,15 @@ struct cpufreq_cooling_device {
 	u32 last_load;
 	unsigned int cpufreq_state;
 	unsigned int clipped_freq;
+	unsigned int cpufreq_floor_state;
+	unsigned int floor_freq;
 	unsigned int max_level;
 	struct freq_table *freq_table;	/* In descending order */
 	struct thermal_cooling_device *cdev;
 	struct cpufreq_policy *policy;
 	struct list_head node;
 	struct time_in_idle *idle_time;
+	struct cpu_cooling_ops *plat_ops;
 };
 
 static DEFINE_IDA(cpufreq_ida);
@@ -165,7 +173,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
 				    unsigned long event, void *data)
 {
 	struct cpufreq_policy *policy = data;
-	unsigned long clipped_freq;
+	unsigned long clipped_freq = ULONG_MAX, floor_freq = 0;
 	struct cpufreq_cooling_device *cpufreq_cdev;
 
 	if (event != CPUFREQ_ADJUST)
@@ -190,13 +198,18 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
 		 *
 		 * But, if clipped_freq is greater than policy->max, we don't
 		 * need to do anything.
+		 *
+		 * Similarly, if policy minimum set by the user is less than
+		 * the floor_frequency, then adjust the policy->min.
 		 */
 		clipped_freq = cpufreq_cdev->clipped_freq;
-
-		if (policy->max > clipped_freq)
-			cpufreq_verify_within_limits(policy, 0, clipped_freq);
+		floor_freq = cpufreq_cdev->floor_freq;
+		if (policy->max > clipped_freq || policy->min < floor_freq)
+			cpufreq_verify_within_limits(policy, floor_freq,
+							clipped_freq);
 		break;
 	}
+
 	mutex_unlock(&cooling_list_lock);
 
 	return NOTIFY_OK;
@@ -373,6 +386,67 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
 }
 
 /**
+ * cpufreq_get_min_state - callback function to get the device floor state.
+ * @cdev: thermal cooling device pointer.
+ * @state: fill this variable with the cooling device floor.
+ *
+ * Callback for the thermal cooling device to return the cpufreq
+ * floor state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpufreq_get_min_state(struct thermal_cooling_device *cdev,
+				 unsigned long *state)
+{
+	struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
+
+	*state = cpufreq_cdev->cpufreq_floor_state;
+
+	return 0;
+}
+
+/**
+ * cpufreq_set_min_state - callback function to set the device floor state.
+ * @cdev: thermal cooling device pointer.
+ * @state: set this variable to the current cooling state.
+ *
+ * Callback for the thermal cooling device to change the cpufreq
+ * floor state.
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+static int cpufreq_set_min_state(struct thermal_cooling_device *cdev,
+				 unsigned long state)
+{
+	struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
+	unsigned int cpu = cpufreq_cdev->policy->cpu;
+	unsigned int floor_freq;
+
+	if (state > cpufreq_cdev->max_level)
+		state = cpufreq_cdev->max_level;
+
+	if (cpufreq_cdev->cpufreq_floor_state == state)
+		return 0;
+
+	cpufreq_cdev->cpufreq_floor_state = state;
+	floor_freq = cpufreq_cdev->freq_table[state].frequency;
+	cpufreq_cdev->floor_freq = floor_freq;
+
+	/*
+	 * Check if the device has a platform mitigation function that
+	 * can handle the CPU freq mitigation, if not, notify cpufreq
+	 * framework.
+	 */
+	if (cpufreq_cdev->plat_ops &&
+		cpufreq_cdev->plat_ops->floor_limit)
+		cpufreq_cdev->plat_ops->floor_limit(cpu, floor_freq);
+	else
+		cpufreq_update_policy(cpu);
+
+	return 0;
+}
+
+/**
  * cpufreq_get_cur_state - callback function to get the current cooling state.
  * @cdev: thermal cooling device pointer.
  * @state: fill this variable with the current cooling state.
@@ -420,7 +494,16 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
 	cpufreq_cdev->cpufreq_state = state;
 	cpufreq_cdev->clipped_freq = clip_freq;
 
-	cpufreq_update_policy(cpufreq_cdev->policy->cpu);
+	/* Check if the device has a platform mitigation function that
+	 * can handle the CPU freq mitigation, if not, notify cpufreq
+	 * framework.
+	 */
+	if (cpufreq_cdev->plat_ops &&
+		cpufreq_cdev->plat_ops->ceil_limit)
+		cpufreq_cdev->plat_ops->ceil_limit(cpufreq_cdev->policy->cpu,
+							clip_freq);
+	else
+		cpufreq_update_policy(cpufreq_cdev->policy->cpu);
 
 	return 0;
 }
@@ -577,6 +660,8 @@ static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
 	.get_max_state = cpufreq_get_max_state,
 	.get_cur_state = cpufreq_get_cur_state,
 	.set_cur_state = cpufreq_set_cur_state,
+	.set_min_state = cpufreq_set_min_state,
+	.get_min_state = cpufreq_get_min_state,
 };
 
 static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
@@ -613,6 +698,9 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
  * @policy: cpufreq policy
  * Normally this should be same as cpufreq policy->related_cpus.
  * @capacitance: dynamic power coefficient for these cpus
+ * @plat_ops: function that does the mitigation by changing the
+ *                   frequencies (Optional). By default, cpufreq framework will
+ *                   be notified of the new limits.
  *
  * This interface function registers the cpufreq cooling device with the name
  * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -624,7 +712,8 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
  */
 static struct thermal_cooling_device *
 __cpufreq_cooling_register(struct device_node *np,
-			struct cpufreq_policy *policy, u32 capacitance)
+			struct cpufreq_policy *policy, u32 capacitance,
+			struct cpu_cooling_ops *plat_ops)
 {
 	struct thermal_cooling_device *cdev;
 	struct cpufreq_cooling_device *cpufreq_cdev;
@@ -705,12 +794,17 @@ __cpufreq_cooling_register(struct device_node *np,
 		cooling_ops = &cpufreq_cooling_ops;
 	}
 
+	cpufreq_cdev->plat_ops = plat_ops;
+
 	cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
 						  cooling_ops);
 	if (IS_ERR(cdev))
 		goto remove_ida;
 
 	cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
+	cpufreq_cdev->floor_freq =
+		cpufreq_cdev->freq_table[cpufreq_cdev->max_level].frequency;
+	cpufreq_cdev->cpufreq_floor_state = cpufreq_cdev->max_level;
 	cpufreq_cdev->cdev = cdev;
 
 	mutex_lock(&cooling_list_lock);
@@ -719,7 +813,7 @@ __cpufreq_cooling_register(struct device_node *np,
 	list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
 	mutex_unlock(&cooling_list_lock);
 
-	if (first)
+	if (first && !cpufreq_cdev->plat_ops)
 		cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
 					  CPUFREQ_POLICY_NOTIFIER);
 
@@ -750,7 +844,7 @@ __cpufreq_cooling_register(struct device_node *np,
 struct thermal_cooling_device *
 cpufreq_cooling_register(struct cpufreq_policy *policy)
 {
-	return __cpufreq_cooling_register(NULL, policy, 0);
+	return __cpufreq_cooling_register(NULL, policy, 0, NULL);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
 
@@ -790,7 +884,8 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
 		of_property_read_u32(np, "dynamic-power-coefficient",
 				     &capacitance);
 
-		cdev = __cpufreq_cooling_register(np, policy, capacitance);
+		cdev = __cpufreq_cooling_register(np, policy, capacitance,
+						NULL);
 		if (IS_ERR(cdev)) {
 			pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n",
 			       policy->cpu, PTR_ERR(cdev));
@@ -804,6 +899,47 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
 EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
 
 /**
+ * cpufreq_platform_cooling_register() - create cpufreq cooling device with
+ * additional platform specific mitigation function.
+ *
+ * @policy: cpufreq policy
+ * @plat_ops: the platform mitigation functions that will be called insted of
+ * cpufreq, if provided.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(struct cpufreq_policy *policy,
+				struct cpu_cooling_ops *plat_ops)
+{
+	struct device_node *cpu_node = NULL;
+	u32 capacitance = 0;
+	struct thermal_cooling_device *cdev = NULL;
+
+	cpu_node = of_cpu_device_node_get(policy->cpu);
+	if (!cpu_node) {
+		pr_err("No cpu node\n");
+		return ERR_PTR(-EINVAL);
+	}
+	if (of_find_property(cpu_node, "#cooling-cells", NULL)) {
+		of_property_read_u32(cpu_node, "dynamic-power-coefficient",
+				     &capacitance);
+
+		cdev = __cpufreq_cooling_register(cpu_node, policy, capacitance,
+							plat_ops);
+		if (IS_ERR(cdev))
+			pr_err("cpu_cooling: cpu%d cooling device err: %ld\n",
+			       policy->cpu, PTR_ERR(cdev));
+	}
+
+	of_node_put(cpu_node);
+	return cdev;
+}
+EXPORT_SYMBOL(cpufreq_platform_cooling_register);
+
+
+/**
  * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
  * @cdev: thermal cooling device pointer.
  *
@@ -825,9 +961,12 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
 	last = list_empty(&cpufreq_cdev_list);
 	mutex_unlock(&cooling_list_lock);
 
-	if (last)
-		cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
-					    CPUFREQ_POLICY_NOTIFIER);
+	if (last) {
+		if (!cpufreq_cdev->plat_ops)
+			cpufreq_unregister_notifier(
+					&thermal_cpufreq_notifier_block,
+					CPUFREQ_POLICY_NOTIFIER);
+	}
 
 	thermal_cooling_device_unregister(cpufreq_cdev->cdev);
 	ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 7442bc4..dd9ae6f 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -864,6 +864,30 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(key, 0600, key_show, key_store);
 
+static void nvm_authenticate_start(struct tb_switch *sw)
+{
+	struct pci_dev *root_port;
+
+	/*
+	 * During host router NVM upgrade we should not allow root port to
+	 * go into D3cold because some root ports cannot trigger PME
+	 * itself. To be on the safe side keep the root port in D0 during
+	 * the whole upgrade process.
+	 */
+	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+	if (root_port)
+		pm_runtime_get_noresume(&root_port->dev);
+}
+
+static void nvm_authenticate_complete(struct tb_switch *sw)
+{
+	struct pci_dev *root_port;
+
+	root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+	if (root_port)
+		pm_runtime_put(&root_port->dev);
+}
+
 static ssize_t nvm_authenticate_show(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
@@ -913,10 +937,18 @@ static ssize_t nvm_authenticate_store(struct device *dev,
 
 		sw->nvm->authenticating = true;
 
-		if (!tb_route(sw))
+		if (!tb_route(sw)) {
+			/*
+			 * Keep root port from suspending as long as the
+			 * NVM upgrade process is running.
+			 */
+			nvm_authenticate_start(sw);
 			ret = nvm_authenticate_host(sw);
-		else
+			if (ret)
+				nvm_authenticate_complete(sw);
+		} else {
 			ret = nvm_authenticate_device(sw);
+		}
 		pm_runtime_mark_last_busy(&sw->dev);
 		pm_runtime_put_autosuspend(&sw->dev);
 	}
@@ -1336,6 +1368,10 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
 	if (ret <= 0)
 		return ret;
 
+	/* Now we can allow root port to suspend again */
+	if (!tb_route(sw))
+		nvm_authenticate_complete(sw);
+
 	if (status) {
 		tb_sw_info(sw, "switch flash authentication failed\n");
 		tb_switch_set_uuid(sw);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 83b577b..d016907 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -406,12 +406,12 @@ static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 {
 	struct usb_request	*req;
-	struct usb_request	*tmp;
 	unsigned long		flags;
 
 	/* fill unused rxq slots with some skb */
 	spin_lock_irqsave(&dev->req_lock, flags);
-	list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
+	while (!list_empty(&dev->rx_reqs)) {
+		req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
 		list_del_init(&req->list);
 		spin_unlock_irqrestore(&dev->req_lock, flags);
 
@@ -1152,7 +1152,6 @@ void gether_disconnect(struct gether *link)
 {
 	struct eth_dev		*dev = link->ioport;
 	struct usb_request	*req;
-	struct usb_request	*tmp;
 
 	WARN_ON(!dev);
 	if (!dev)
@@ -1169,7 +1168,8 @@ void gether_disconnect(struct gether *link)
 	 */
 	usb_ep_disable(link->in_ep);
 	spin_lock(&dev->req_lock);
-	list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) {
+	while (!list_empty(&dev->tx_reqs)) {
+		req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
 		list_del(&req->list);
 
 		spin_unlock(&dev->req_lock);
@@ -1181,7 +1181,8 @@ void gether_disconnect(struct gether *link)
 
 	usb_ep_disable(link->out_ep);
 	spin_lock(&dev->req_lock);
-	list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
+	while (!list_empty(&dev->rx_reqs)) {
+		req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
 		list_del(&req->list);
 
 		spin_unlock(&dev->req_lock);
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 3a16431..fcf13ef 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2033,6 +2033,7 @@ static inline int machine_without_vbus_sense(void)
 {
 	return machine_is_omap_innovator()
 		|| machine_is_omap_osk()
+		|| machine_is_omap_palmte()
 		|| machine_is_sx1()
 		/* No known omap7xx boards with vbus sense */
 		|| cpu_is_omap7xx();
@@ -2041,7 +2042,7 @@ static inline int machine_without_vbus_sense(void)
 static int omap_udc_start(struct usb_gadget *g,
 		struct usb_gadget_driver *driver)
 {
-	int		status = -ENODEV;
+	int		status;
 	struct omap_ep	*ep;
 	unsigned long	flags;
 
@@ -2079,6 +2080,7 @@ static int omap_udc_start(struct usb_gadget *g,
 			goto done;
 		}
 	} else {
+		status = 0;
 		if (can_pullup(udc))
 			pullup_enable(udc);
 		else
@@ -2593,9 +2595,22 @@ omap_ep_setup(char *name, u8 addr, u8 type,
 
 static void omap_udc_release(struct device *dev)
 {
-	complete(udc->done);
+	pullup_disable(udc);
+	if (!IS_ERR_OR_NULL(udc->transceiver)) {
+		usb_put_phy(udc->transceiver);
+		udc->transceiver = NULL;
+	}
+	omap_writew(0, UDC_SYSCON1);
+	remove_proc_file();
+	if (udc->dc_clk) {
+		if (udc->clk_requested)
+			omap_udc_enable_clock(0);
+		clk_put(udc->hhc_clk);
+		clk_put(udc->dc_clk);
+	}
+	if (udc->done)
+		complete(udc->done);
 	kfree(udc);
-	udc = NULL;
 }
 
 static int
@@ -2627,6 +2642,7 @@ omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
 	udc->gadget.speed = USB_SPEED_UNKNOWN;
 	udc->gadget.max_speed = USB_SPEED_FULL;
 	udc->gadget.name = driver_name;
+	udc->gadget.quirk_ep_out_aligned_size = 1;
 	udc->transceiver = xceiv;
 
 	/* ep0 is special; put it right after the SETUP buffer */
@@ -2867,8 +2883,8 @@ static int omap_udc_probe(struct platform_device *pdev)
 		udc->clr_halt = UDC_RESET_EP;
 
 	/* USB general purpose IRQ:  ep0, state changes, dma, etc */
-	status = request_irq(pdev->resource[1].start, omap_udc_irq,
-			0, driver_name, udc);
+	status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
+				  omap_udc_irq, 0, driver_name, udc);
 	if (status != 0) {
 		ERR("can't get irq %d, err %d\n",
 			(int) pdev->resource[1].start, status);
@@ -2876,20 +2892,20 @@ static int omap_udc_probe(struct platform_device *pdev)
 	}
 
 	/* USB "non-iso" IRQ (PIO for all but ep0) */
-	status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
-			0, "omap_udc pio", udc);
+	status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
+				  omap_udc_pio_irq, 0, "omap_udc pio", udc);
 	if (status != 0) {
 		ERR("can't get irq %d, err %d\n",
 			(int) pdev->resource[2].start, status);
-		goto cleanup2;
+		goto cleanup1;
 	}
 #ifdef	USE_ISO
-	status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
-			0, "omap_udc iso", udc);
+	status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
+				  omap_udc_iso_irq, 0, "omap_udc iso", udc);
 	if (status != 0) {
 		ERR("can't get irq %d, err %d\n",
 			(int) pdev->resource[3].start, status);
-		goto cleanup3;
+		goto cleanup1;
 	}
 #endif
 	if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
@@ -2900,23 +2916,8 @@ static int omap_udc_probe(struct platform_device *pdev)
 	}
 
 	create_proc_file();
-	status = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
-			omap_udc_release);
-	if (status)
-		goto cleanup4;
-
-	return 0;
-
-cleanup4:
-	remove_proc_file();
-
-#ifdef	USE_ISO
-cleanup3:
-	free_irq(pdev->resource[2].start, udc);
-#endif
-
-cleanup2:
-	free_irq(pdev->resource[1].start, udc);
+	return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
+					  omap_udc_release);
 
 cleanup1:
 	kfree(udc);
@@ -2943,42 +2944,15 @@ static int omap_udc_remove(struct platform_device *pdev)
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 
-	if (!udc)
-		return -ENODEV;
-
-	usb_del_gadget_udc(&udc->gadget);
-	if (udc->driver)
-		return -EBUSY;
-
 	udc->done = &done;
 
-	pullup_disable(udc);
-	if (!IS_ERR_OR_NULL(udc->transceiver)) {
-		usb_put_phy(udc->transceiver);
-		udc->transceiver = NULL;
-	}
-	omap_writew(0, UDC_SYSCON1);
+	usb_del_gadget_udc(&udc->gadget);
 
-	remove_proc_file();
-
-#ifdef	USE_ISO
-	free_irq(pdev->resource[3].start, udc);
-#endif
-	free_irq(pdev->resource[2].start, udc);
-	free_irq(pdev->resource[1].start, udc);
-
-	if (udc->dc_clk) {
-		if (udc->clk_requested)
-			omap_udc_enable_clock(0);
-		clk_put(udc->hhc_clk);
-		clk_put(udc->dc_clk);
-	}
+	wait_for_completion(&done);
 
 	release_mem_region(pdev->resource[0].start,
 			pdev->resource[0].end - pdev->resource[0].start + 1);
 
-	wait_for_completion(&done);
-
 	return 0;
 }
 
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index e12bb25..7ab6cae 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -251,25 +251,10 @@ static void release_memory_resource(struct resource *resource)
 	kfree(resource);
 }
 
-/*
- * Host memory not allocated to dom0. We can use this range for hotplug-based
- * ballooning.
- *
- * It's a type-less resource. Setting IORESOURCE_MEM will make resource
- * management algorithms (arch_remove_reservations()) look into guest e820,
- * which we don't want.
- */
-static struct resource hostmem_resource = {
-	.name   = "Host RAM",
-};
-
-void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
-{}
-
 static struct resource *additional_memory_resource(phys_addr_t size)
 {
-	struct resource *res, *res_hostmem;
-	int ret = -ENOMEM;
+	struct resource *res;
+	int ret;
 
 	res = kzalloc(sizeof(*res), GFP_KERNEL);
 	if (!res)
@@ -278,42 +263,13 @@ static struct resource *additional_memory_resource(phys_addr_t size)
 	res->name = "System RAM";
 	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
-	res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
-	if (res_hostmem) {
-		/* Try to grab a range from hostmem */
-		res_hostmem->name = "Host memory";
-		ret = allocate_resource(&hostmem_resource, res_hostmem,
-					size, 0, -1,
-					PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
-	}
-
-	if (!ret) {
-		/*
-		 * Insert this resource into iomem. Because hostmem_resource
-		 * tracks portion of guest e820 marked as UNUSABLE noone else
-		 * should try to use it.
-		 */
-		res->start = res_hostmem->start;
-		res->end = res_hostmem->end;
-		ret = insert_resource(&iomem_resource, res);
-		if (ret < 0) {
-			pr_err("Can't insert iomem_resource [%llx - %llx]\n",
-				res->start, res->end);
-			release_memory_resource(res_hostmem);
-			res_hostmem = NULL;
-			res->start = res->end = 0;
-		}
-	}
-
-	if (ret) {
-		ret = allocate_resource(&iomem_resource, res,
-					size, 0, -1,
-					PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
-		if (ret < 0) {
-			pr_err("Cannot allocate new System RAM resource\n");
-			kfree(res);
-			return NULL;
-		}
+	ret = allocate_resource(&iomem_resource, res,
+				size, 0, -1,
+				PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+	if (ret < 0) {
+		pr_err("Cannot allocate new System RAM resource\n");
+		kfree(res);
+		return NULL;
 	}
 
 #ifdef CONFIG_SPARSEMEM
@@ -325,7 +281,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
 			pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
 			       pfn, limit);
 			release_memory_resource(res);
-			release_memory_resource(res_hostmem);
 			return NULL;
 		}
 	}
@@ -747,8 +702,6 @@ static int __init balloon_init(void)
 	set_online_page_callback(&xen_online_page);
 	register_memory_notifier(&xen_memory_nb);
 	register_sysctl_table(xen_root);
-
-	arch_xen_balloon_init(&hostmem_resource);
 #endif
 
 #ifdef CONFIG_XEN_PV
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 2f11ca7..77224d8 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -385,8 +385,8 @@ static int create_active(struct sock_mapping *map, int *evtchn)
 out_error:
 	if (*evtchn >= 0)
 		xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
-	kfree(map->active.data.in);
-	kfree(map->active.ring);
+	free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
+	free_page((unsigned long)map->active.ring);
 	return ret;
 }
 
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 23f1387..e7df65d 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -36,6 +36,7 @@
 #include <asm/xen/hypervisor.h>
 
 #include <xen/xen.h>
+#include <xen/xen-ops.h>
 #include <xen/page.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/memory.h>
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 479b7fd..071075d 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -379,7 +379,7 @@ void afs_zap_data(struct afs_vnode *vnode)
 int afs_validate(struct afs_vnode *vnode, struct key *key)
 {
 	time64_t now = ktime_get_real_seconds();
-	bool valid = false;
+	bool valid;
 	int ret;
 
 	_enter("{v={%x:%u} fl=%lx},%x",
@@ -399,15 +399,21 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
 			vnode->cb_v_break = vnode->volume->cb_v_break;
 			valid = false;
 		} else if (vnode->status.type == AFS_FTYPE_DIR &&
-			   test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) &&
-			   vnode->cb_expires_at - 10 > now) {
-			valid = true;
-		} else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
-			   vnode->cb_expires_at - 10 > now) {
+			   (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) ||
+			    vnode->cb_expires_at - 10 <= now)) {
+			valid = false;
+		} else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) ||
+			   vnode->cb_expires_at - 10 <= now) {
+			valid = false;
+		} else {
 			valid = true;
 		}
 	} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
 		valid = true;
+	} else {
+		vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
+		vnode->cb_v_break = vnode->volume->cb_v_break;
+		valid = false;
 	}
 
 	read_sequnlock_excl(&vnode->cb_lock);
diff --git a/fs/aio.c b/fs/aio.c
index b9350f3..04c4d62 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1436,6 +1436,7 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
 		ret = ioprio_check_cap(iocb->aio_reqprio);
 		if (ret) {
 			pr_debug("aio ioprio check cap error: %d\n", ret);
+			fput(req->ki_filp);
 			return ret;
 		}
 
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index ba8950b..84cb6e5 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -3344,7 +3344,8 @@ static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
 	kfree(m);
 }
 
-static void tail_append_pending_moves(struct pending_dir_move *moves,
+static void tail_append_pending_moves(struct send_ctx *sctx,
+				      struct pending_dir_move *moves,
 				      struct list_head *stack)
 {
 	if (list_empty(&moves->list)) {
@@ -3355,6 +3356,10 @@ static void tail_append_pending_moves(struct pending_dir_move *moves,
 		list_add_tail(&moves->list, stack);
 		list_splice_tail(&list, stack);
 	}
+	if (!RB_EMPTY_NODE(&moves->node)) {
+		rb_erase(&moves->node, &sctx->pending_dir_moves);
+		RB_CLEAR_NODE(&moves->node);
+	}
 }
 
 static int apply_children_dir_moves(struct send_ctx *sctx)
@@ -3369,7 +3374,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
 		return 0;
 
 	INIT_LIST_HEAD(&stack);
-	tail_append_pending_moves(pm, &stack);
+	tail_append_pending_moves(sctx, pm, &stack);
 
 	while (!list_empty(&stack)) {
 		pm = list_first_entry(&stack, struct pending_dir_move, list);
@@ -3380,7 +3385,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
 			goto out;
 		pm = get_pending_dir_moves(sctx, parent_ino);
 		if (pm)
-			tail_append_pending_moves(pm, &stack);
+			tail_append_pending_moves(sctx, pm, &stack);
 	}
 	return 0;
 
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 95983c7..5ab411d 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -244,11 +244,13 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
 
 	ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
 
-	cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_retry);
+	cache->cache.ops->put_object(&xobject->fscache,
+		(enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
 	goto try_again;
 
 requeue:
-	cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
+	cache->cache.ops->put_object(&xobject->fscache,
+		(enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
 	_leave(" = -ETIMEDOUT");
 	return -ETIMEDOUT;
 }
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 40f7595..8a57740 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -535,7 +535,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
 					    netpage->index, cachefiles_gfp);
 		if (ret < 0) {
 			if (ret == -EEXIST) {
+				put_page(backpage);
+				backpage = NULL;
 				put_page(netpage);
+				netpage = NULL;
 				fscache_retrieval_complete(op, 1);
 				continue;
 			}
@@ -608,7 +611,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
 					    netpage->index, cachefiles_gfp);
 		if (ret < 0) {
 			if (ret == -EEXIST) {
+				put_page(backpage);
+				backpage = NULL;
 				put_page(netpage);
+				netpage = NULL;
 				fscache_retrieval_complete(op, 1);
 				continue;
 			}
@@ -962,11 +968,8 @@ void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
 	__releases(&object->fscache.cookie->lock)
 {
 	struct cachefiles_object *object;
-	struct cachefiles_cache *cache;
 
 	object = container_of(_object, struct cachefiles_object, fscache);
-	cache = container_of(object->fscache.cache,
-			     struct cachefiles_cache, cache);
 
 	_enter("%p,{%lu}", object, page->index);
 
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 0a29a00..511e6c6 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -135,7 +135,8 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
 	struct dentry *dentry = object->dentry;
 	int ret;
 
-	ASSERT(dentry);
+	if (!dentry)
+		return -ESTALE;
 
 	_enter("%p,#%d", object, auxdata->len);
 
diff --git a/fs/dax.c b/fs/dax.c
index b0cd136..3a2682a6c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -423,7 +423,7 @@ bool dax_lock_mapping_entry(struct page *page)
 	for (;;) {
 		mapping = READ_ONCE(page->mapping);
 
-		if (!dax_mapping(mapping))
+		if (!mapping || !dax_mapping(mapping))
 			break;
 
 		/*
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 645158d..63707ab 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -77,7 +77,7 @@ static bool dentry_connected(struct dentry *dentry)
 		struct dentry *parent = dget_parent(dentry);
 
 		dput(dentry);
-		if (IS_ROOT(dentry)) {
+		if (dentry == parent) {
 			dput(parent);
 			return false;
 		}
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 9edc920..6d9cb17 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -730,6 +730,9 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
 
 	if (awaken)
 		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
+	if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
+		wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
+
 
 	/* Prevent a race with our last child, which has to signal EV_CLEARED
 	 * before dropping our spinlock.
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 374b568..9bdff5e 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -329,13 +329,14 @@ void hfs_bmap_free(struct hfs_bnode *node)
 
 		nidx -= len * 8;
 		i = node->next;
-		hfs_bnode_put(node);
 		if (!i) {
 			/* panic */;
 			pr_crit("unable to free bnode %u. bmap not found!\n",
 				node->this);
+			hfs_bnode_put(node);
 			return;
 		}
+		hfs_bnode_put(node);
 		node = hfs_bnode_find(tree, i);
 		if (IS_ERR(node))
 			return;
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index de14b2b..3de3bc4 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -454,14 +454,15 @@ void hfs_bmap_free(struct hfs_bnode *node)
 
 		nidx -= len * 8;
 		i = node->next;
-		hfs_bnode_put(node);
 		if (!i) {
 			/* panic */;
 			pr_crit("unable to free bnode %u. "
 					"bmap not found!\n",
 				node->this);
+			hfs_bnode_put(node);
 			return;
 		}
+		hfs_bnode_put(node);
 		node = hfs_bnode_find(tree, i);
 		if (IS_ERR(node))
 			return;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 86ac2c5..e0fe9a0 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1733,7 +1733,8 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
 	if (fh)
 		hdr->args.fh = fh;
 
-	if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+	if (vers == 4 &&
+		!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
 		goto out_failed;
 
 	/*
@@ -1798,7 +1799,8 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
 	if (fh)
 		hdr->args.fh = fh;
 
-	if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+	if (vers == 4 &&
+		!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
 		goto out_failed;
 
 	/*
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 9f88188..4bf8d58 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -125,10 +125,10 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
 
 check_gen:
 	if (handle->ih_generation != inode->i_generation) {
-		iput(inode);
 		trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
 						  handle->ih_generation,
 						  inode->i_generation);
+		iput(inode);
 		result = ERR_PTR(-ESTALE);
 		goto bail;
 	}
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 7eb3b0a..f55f82c 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -156,18 +156,14 @@ static int __ocfs2_move_extent(handle_t *handle,
 }
 
 /*
- * lock allocators, and reserving appropriate number of bits for
- * meta blocks and data clusters.
- *
- * in some cases, we don't need to reserve clusters, just let data_ac
- * be NULL.
+ * lock allocator, and reserve appropriate number of bits for
+ * meta blocks.
  */
-static int ocfs2_lock_allocators_move_extents(struct inode *inode,
+static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
 					struct ocfs2_extent_tree *et,
 					u32 clusters_to_move,
 					u32 extents_to_split,
 					struct ocfs2_alloc_context **meta_ac,
-					struct ocfs2_alloc_context **data_ac,
 					int extra_blocks,
 					int *credits)
 {
@@ -192,13 +188,6 @@ static int ocfs2_lock_allocators_move_extents(struct inode *inode,
 		goto out;
 	}
 
-	if (data_ac) {
-		ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
-		if (ret) {
-			mlog_errno(ret);
-			goto out;
-		}
-	}
 
 	*credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
 
@@ -257,10 +246,10 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
 		}
 	}
 
-	ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
-						 &context->meta_ac,
-						 &context->data_ac,
-						 extra_blocks, &credits);
+	ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
+						*len, 1,
+						&context->meta_ac,
+						extra_blocks, &credits);
 	if (ret) {
 		mlog_errno(ret);
 		goto out;
@@ -283,6 +272,21 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
 		}
 	}
 
+	/*
+	 * Make sure ocfs2_reserve_cluster is called after
+	 * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
+	 *
+	 * If ocfs2_reserve_cluster is called
+	 * before __ocfs2_flush_truncate_log, dead lock on global bitmap
+	 * may happen.
+	 *
+	 */
+	ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
+	if (ret) {
+		mlog_errno(ret);
+		goto out_unlock_mutex;
+	}
+
 	handle = ocfs2_start_trans(osb, credits);
 	if (IS_ERR(handle)) {
 		ret = PTR_ERR(handle);
@@ -600,9 +604,10 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
 		}
 	}
 
-	ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
-						 &context->meta_ac,
-						 NULL, extra_blocks, &credits);
+	ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
+						len, 1,
+						&context->meta_ac,
+						extra_blocks, &credits);
 	if (ret) {
 		mlog_errno(ret);
 		goto out;
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index f4fd2e7..03cd593 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -806,17 +806,14 @@ static int ramoops_probe(struct platform_device *pdev)
 
 	cxt->pstore.data = cxt;
 	/*
-	 * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we
-	 * have to handle dumps, we must have at least record_size buffer. And
-	 * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be
-	 * ZERO_SIZE_PTR).
+	 * Since bufsize is only used for dmesg crash dumps, it
+	 * must match the size of the dprz record (after PRZ header
+	 * and ECC bytes have been accounted for).
 	 */
-	if (cxt->console_size)
-		cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */
-	cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
-	cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
+	cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
+	cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
 	if (!cxt->pstore.buf) {
-		pr_err("cannot allocate pstore buffer\n");
+		pr_err("cannot allocate pstore crash dump buffer\n");
 		err = -ENOMEM;
 		goto fail_clear;
 	}
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 499a20a5..273736f 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -275,7 +275,7 @@ static int __sysv_write_inode(struct inode *inode, int wait)
                 }
         }
 	brelse(bh);
-	return 0;
+	return err;
 }
 
 int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
diff --git a/include/linux/bluetooth-power.h b/include/linux/bluetooth-power.h
index 723584a..108309f 100644
--- a/include/linux/bluetooth-power.h
+++ b/include/linux/bluetooth-power.h
@@ -61,6 +61,12 @@ struct bluetooth_power_platform_data {
 	struct bt_power_vreg_data *bt_vdd_xtal;
 	/* VDD_CORE voltage regulator */
 	struct bt_power_vreg_data *bt_vdd_core;
+	/* VDD_DIG digital voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_dig;
+	/* VDD RFA1 digital voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_rfa1;
+	/* VDD RFA2 digital voltage regulator */
+	struct bt_power_vreg_data *bt_vdd_rfa2;
 	/* Optional: chip power down gpio-regulator
 	 * chip power down data is required when bluetooth module
 	 * and other modules like wifi co-exist in a single chip and
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c6fade2..e9f8661 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -30,6 +30,12 @@
 
 struct cpufreq_policy;
 
+typedef int (*plat_mitig_t)(int cpu, u32 clip_freq);
+
+struct cpu_cooling_ops {
+	plat_mitig_t ceil_limit, floor_limit;
+};
+
 #ifdef CONFIG_CPU_THERMAL
 /**
  * cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -38,6 +44,10 @@ struct cpufreq_policy;
 struct thermal_cooling_device *
 cpufreq_cooling_register(struct cpufreq_policy *policy);
 
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(struct cpufreq_policy *policy,
+					struct cpu_cooling_ops *ops);
+
 /**
  * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
  * @cdev: thermal cooling device pointer.
@@ -74,6 +84,13 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
 {
 	return NULL;
 }
+
+static inline struct thermal_cooling_device *
+cpufreq_platform_cooling_register(struct cpufreq_policy *policy,
+					struct cpu_cooling_ops *ops)
+{
+	return NULL;
+}
 #endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */
 
 #endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index c5700cf..50b6a02 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -59,6 +59,7 @@
 #define DIAG_IOCTL_HDLC_TOGGLE	38
 #define DIAG_IOCTL_QUERY_PD_LOGGING	39
 #define DIAG_IOCTL_QUERY_CON_ALL	40
+#define DIAG_IOCTL_QUERY_MD_PID	41
 
 /* PC Tools IDs */
 #define APQ8060_TOOLS_ID	4062
@@ -139,7 +140,7 @@
  * a new RANGE of SSIDs to the msg_mask_tbl.
  */
 #define MSG_MASK_TBL_CNT		26
-#define APPS_EVENT_LAST_ID		0xC85
+#define APPS_EVENT_LAST_ID		0xC92
 
 #define MSG_SSID_0			0
 #define MSG_SSID_0_LAST			129
@@ -174,7 +175,7 @@
 #define MSG_SSID_15			8000
 #define MSG_SSID_15_LAST		8000
 #define MSG_SSID_16			8500
-#define MSG_SSID_16_LAST		8529
+#define MSG_SSID_16_LAST		8531
 #define MSG_SSID_17			9000
 #define MSG_SSID_17_LAST		9008
 #define MSG_SSID_18			9500
@@ -770,7 +771,9 @@ static const uint32_t msg_bld_masks_16[] = {
 	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
 		MSG_LVL_FATAL,
 	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
-		MSG_LVL_FATAL
+		MSG_LVL_FATAL,
+	MSG_LVL_MED,
+	MSG_LVL_MED
 };
 
 static const uint32_t msg_bld_masks_17[] =  {
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 34cf0fd..610815e 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
 static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
 					      int n_pages)
 {
-	atomic_sub(n_pages, &op->n_pages);
-	if (atomic_read(&op->n_pages) <= 0)
+	if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
 		fscache_op_complete(&op->op, false);
 }
 
diff --git a/include/linux/ipa_wigig.h b/include/linux/ipa_wigig.h
new file mode 100644
index 0000000..85fa4c4
--- /dev/null
+++ b/include/linux/ipa_wigig.h
@@ -0,0 +1,484 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _IPA_WIGIG_H_
+#define _IPA_WIGIG_H_
+
+#include <linux/msm_ipa.h>
+#include <linux/ipa.h>
+
+typedef void (*ipa_wigig_misc_int_cb)(void *priv);
+
+/*
+ * struct ipa_wigig_init_in_params - wigig init input parameters
+ *
+ * @periph_baddr_pa: physical address of wigig HW base
+ * @pseudo_cause_pa: physical address of wigig HW pseudo_cause register
+ * @int_gen_tx_pa: physical address of wigig HW int_gen_tx register
+ * @int_gen_rx_pa: physical address of wigig HW int_gen_rx register
+ * @dma_ep_misc_pa: physical address of wigig HW dma_ep_misc register
+ * @notify: uc ready callback
+ * @int_notify: wigig misc interrupt callback
+ * @priv: uc ready callback cookie
+ */
+struct ipa_wigig_init_in_params {
+	phys_addr_t periph_baddr_pa;
+	phys_addr_t pseudo_cause_pa;
+	phys_addr_t int_gen_tx_pa;
+	phys_addr_t int_gen_rx_pa;
+	phys_addr_t dma_ep_misc_pa;
+	ipa_uc_ready_cb notify;
+	ipa_wigig_misc_int_cb int_notify;
+	void *priv;
+};
+
+/*
+ * struct ipa_wigig_init_out_params - wigig init output parameters
+ *
+ * @is_uC_ready: is uC ready. No API should be called until uC is ready.
+ * @uc_db_pa: physical address of IPA uC doorbell
+ */
+struct ipa_wigig_init_out_params {
+	bool is_uc_ready;
+	phys_addr_t uc_db_pa;
+};
+
+/*
+ * struct ipa_wigig_hdr_info - Header to install on IPA HW
+ *
+ * @hdr: header to install on IPA HW
+ * @hdr_len: length of header
+ * @dst_mac_addr_offset: destination mac address offset
+ * @hdr_type: layer two header type
+ */
+struct ipa_wigig_hdr_info {
+	u8 *hdr;
+	u8 hdr_len;
+	u8 dst_mac_addr_offset;
+	enum ipa_hdr_l2_type hdr_type;
+};
+
+/*
+ * struct ipa_wigig_reg_intf_in_params - parameters for offload interface
+ *	registration
+ *
+ * @netdev_name: network interface name
+ * @netdev_mac: netdev mac address
+ * @hdr_info: header information
+ */
+struct ipa_wigig_reg_intf_in_params {
+	const char *netdev_name;
+	u8 netdev_mac[IPA_MAC_ADDR_SIZE];
+	struct ipa_wigig_hdr_info hdr_info[IPA_IP_MAX];
+};
+
+/*
+ * struct ipa_wigig_pipe_setup_info - WIGIG TX/Rx configuration
+ * @desc_ring_base_pa: physical address of the base of the descriptor ring
+ * @desc_ring_size: size of the descriptor ring
+ * @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
+ * @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
+ * @status_ring_base_pa: physical address of the base of the status ring
+ * @status_ring_size: status ring size
+ * @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
+ * @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
+ */
+struct ipa_wigig_pipe_setup_info {
+	phys_addr_t desc_ring_base_pa;
+	u32 desc_ring_size;
+	phys_addr_t desc_ring_HWHEAD_pa;
+	phys_addr_t desc_ring_HWTAIL_pa;
+
+	phys_addr_t status_ring_base_pa;
+	u32 status_ring_size;
+	phys_addr_t status_ring_HWHEAD_pa;
+	phys_addr_t status_ring_HWTAIL_pa;
+};
+
+/*
+ * struct ipa_wigig_pipe_setup_info_smmu - WIGIG TX/Rx configuration smmu mode
+ * @desc_ring_base: sg_table of the base of the descriptor ring
+ * @desc_ring_base_iova: IO virtual address mapped to physical base address
+ * @desc_ring_size: size of the descriptor ring
+ * @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
+ * @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
+ * @status_ring_base: sg_table of the base of the status ring
+ * @status_ring_base_iova: IO virtual address mapped to physical base address
+ * @status_ring_size: status ring size
+ * @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
+ * @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
+ */
+struct ipa_wigig_pipe_setup_info_smmu {
+	struct sg_table desc_ring_base;
+	u64 desc_ring_base_iova;
+	u32 desc_ring_size;
+	phys_addr_t desc_ring_HWHEAD_pa;
+	phys_addr_t desc_ring_HWTAIL_pa;
+
+	struct sg_table status_ring_base;
+	u64 status_ring_base_iova;
+	u32 status_ring_size;
+	phys_addr_t status_ring_HWHEAD_pa;
+	phys_addr_t status_ring_HWTAIL_pa;
+};
+
+/*
+ * struct ipa_wigig_rx_pipe_data_buffer_info - WIGIG Rx data buffer
+ *	configuration
+ * @data_buffer_base_pa: physical address of the physically contiguous
+ *			Rx data buffer
+ * @data_buffer_size: size of the data buffer
+ */
+struct ipa_wigig_rx_pipe_data_buffer_info {
+	phys_addr_t data_buffer_base_pa;
+	u32 data_buffer_size;
+};
+
+/*
+ * struct ipa_wigig_rx_pipe_data_buffer_info_smmu - WIGIG Rx data buffer
+ *	configuration smmu mode
+ * @data_buffer_base: sg_table of the physically contiguous
+ *			Rx data buffer
+ * @data_buffer_base_iova: IO virtual address mapped to physical base address
+ * @data_buffer_size: size of the data buffer
+ */
+struct ipa_wigig_rx_pipe_data_buffer_info_smmu {
+	struct sg_table data_buffer_base;
+	u64 data_buffer_base_iova;
+	u32 data_buffer_size;
+};
+
+/*
+ * struct ipa_wigig_conn_rx_in_params - information provided by
+ *				WIGIG offload client for Rx pipe
+ * @notify: client callback function
+ * @priv: client cookie
+ * @pipe: parameters to connect Rx pipe (WIGIG to IPA)
+ * @dbuff: Rx data buffer info
+ */
+struct ipa_wigig_conn_rx_in_params {
+	ipa_notify_cb notify;
+	void *priv;
+	struct ipa_wigig_pipe_setup_info pipe;
+	struct ipa_wigig_rx_pipe_data_buffer_info dbuff;
+};
+
+/*
+ * struct ipa_wigig_conn_rx_in_params_smmu - information provided by
+ *				WIGIG offload client for Rx pipe
+ * @notify: client callback function
+ * @priv: client cookie
+ * @pipe_smmu: parameters to connect Rx pipe (WIGIG to IPA) smmu mode
+ * @dbuff_smmu: Rx data buffer info smmu mode
+ */
+struct ipa_wigig_conn_rx_in_params_smmu {
+	ipa_notify_cb notify;
+	void *priv;
+	struct ipa_wigig_pipe_setup_info_smmu pipe_smmu;
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu dbuff_smmu;
+};
+
+/*
+ * struct ipa_wigig_conn_out_params - information provided
+ *				to WIGIG driver
+ * @client: client type allocated by IPA driver
+ */
+struct ipa_wigig_conn_out_params {
+	enum ipa_client_type client;
+};
+
+/*
+ * struct ipa_wigig_tx_pipe_data_buffer_info - WIGIG Tx data buffer
+ *	configuration
+ * @data_buffer_size: size of a single data buffer
+ */
+struct ipa_wigig_tx_pipe_data_buffer_info {
+	u32 data_buffer_size;
+};
+
+/*
+ * struct ipa_wigig_tx_pipe_data_buffer_info_smmu - WIGIG Tx data buffer
+ *				configuration smmu mode
+ * @data_buffer_base_pa: sg_tables of the Tx data buffers
+ * @data_buffer_base_iova: IO virtual address mapped to physical base address
+ * @num_buffers: number of buffers
+ * @data_buffer_size: size of a single data buffer
+ */
+struct ipa_wigig_tx_pipe_data_buffer_info_smmu {
+	struct sg_table *data_buffer_base;
+	u64 *data_buffer_base_iova;
+	u32 num_buffers;
+	u32 data_buffer_size;
+};
+
+/*
+ * struct ipa_wigig_conn_tx_in_params - information provided by
+ *		wigig offload client for Tx pipe
+ * @pipe: parameters to connect Tx pipe (IPA to WIGIG)
+ * @dbuff: Tx data buffer info
+ * @int_gen_tx_bit_num: bit in int_gen_tx register associated with this client
+ * @client_mac: MAC address of client to be connected
+ */
+struct ipa_wigig_conn_tx_in_params {
+	struct ipa_wigig_pipe_setup_info pipe;
+	struct ipa_wigig_tx_pipe_data_buffer_info dbuff;
+	u8 int_gen_tx_bit_num;
+	u8 client_mac[IPA_MAC_ADDR_SIZE];
+};
+
+/*
+ * struct ipa_wigig_conn_tx_in_params_smmu - information provided by
+ *		wigig offload client for Tx pipe
+ * @pipe_smmu: parameters to connect Tx pipe (IPA to WIGIG) smmu mode
+ * @dbuff_smmu: Tx data buffer info smmu mode
+ * @int_gen_tx_bit_num: bit in int_gen_tx register associated with this client
+ * @client_mac: MAC address of client to be connected
+ */
+struct ipa_wigig_conn_tx_in_params_smmu {
+	struct ipa_wigig_pipe_setup_info_smmu pipe_smmu;
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu dbuff_smmu;
+	u8 int_gen_tx_bit_num;
+	u8 client_mac[IPA_MAC_ADDR_SIZE];
+};
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+/*
+ * ipa_wigig_init - Client should call this function to
+ * init WIGIG IPA offload data path
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
+	struct ipa_wigig_init_out_params *out);
+
+/*
+ * ipa_wigig_cleanup - Client should call this function to
+ * clean up WIGIG IPA offload data path
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_cleanup(void);
+
+/*
+ * ipa_wigig_is_smmu_enabled - get smmu state
+ *
+ * @Return true if smmu is enabled, false if disabled
+ */
+bool ipa_wigig_is_smmu_enabled(void);
+
+/*
+ * ipa_wigig_reg_intf - Client should call this function to
+ * register interface
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_reg_intf(struct ipa_wigig_reg_intf_in_params *in);
+
+/*
+ * ipa_wigig_dereg_intf - Client Driver should call this
+ * function to deregister before unload and after disconnect
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_dereg_intf(const char *netdev_name);
+
+/*
+ * ipa_wigig_conn_rx_pipe - Client should call this
+ * function to connect the rx (UL) pipe
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Non SMMU mode only, Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_conn_rx_pipe(struct ipa_wigig_conn_rx_in_params *in,
+	struct ipa_wigig_conn_out_params *out);
+
+/*
+ * ipa_wigig_conn_rx_pipe_smmu - Client should call this
+ * function to connect the rx (UL) pipe
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: SMMU mode only, Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_conn_rx_pipe_smmu(struct ipa_wigig_conn_rx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out);
+
+/*
+ * ipa_wigig_conn_client - Client should call this
+ * function to connect one of the tx (DL) pipes when a WIGIG client connects
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Non SMMU mode only, Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_conn_client(struct ipa_wigig_conn_tx_in_params *in,
+	struct ipa_wigig_conn_out_params *out);
+
+/*
+ * ipa_wigig_conn_client_smmu - Client should call this
+ * function to connect one of the tx (DL) pipes when a WIGIG client connects
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: SMMU mode only, Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_conn_client_smmu(struct ipa_wigig_conn_tx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out);
+
+/*
+ * ipa_wigig_disconn_pipe() - Client should call this
+ *		function to disconnect a pipe
+ *
+ * @client: [in] pipe to be disconnected
+ *
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wigig_disconn_pipe(enum ipa_client_type client);
+
+/*
+ * ipa_wigig_enable_pipe() - Client should call this
+ *		function to enable IPA offload data path
+ *
+ * @client: [in] pipe to be enabled
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+
+int ipa_wigig_enable_pipe(enum ipa_client_type client);
+
+/*
+ * ipa_wigig_disable_pipe() - Client should call this
+ *		function to disable IPA offload data path
+ *
+ * @client: [in] pipe to be disabled
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wigig_disable_pipe(enum ipa_client_type client);
+
+/*
+ * ipa_wigig_tx_dp() - transmit tx packet through IPA to 11ad HW
+ *
+ * @dst: [in] destination ipa client pipe to be used
+ * @skb: [in] skb to be transmitted
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wigig_tx_dp(enum ipa_client_type dst, struct sk_buff *skb);
+
+/**
+ * ipa_wigig_set_perf_profile() - Client should call this function to
+ *		set IPA clock bandwidth based on data rates
+ *
+ * @max_supported_bw_mbps: [in] maximum bandwidth needed (in Mbps)
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wigig_set_perf_profile(u32 max_supported_bw_mbps);
+
+#else /* (CONFIG_IPA || CONFIG_IPA3) */
+static inline int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
+	struct ipa_wigig_init_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_cleanup(void)
+{
+	return -EPERM;
+}
+
+static inline bool ipa_wigig_is_smmu_enabled(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_reg_intf(struct ipa_wigig_reg_intf_in_params *in)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_dereg_intf(const char *netdev_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_conn_rx_pipe(
+	struct ipa_wigig_conn_rx_in_params *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_conn_rx_pipe_smmu(
+	struct ipa_wigig_conn_rx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_conn_client(
+	struct ipa_wigig_conn_tx_in_params *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_conn_client_smmu(
+	struct ipa_wigig_conn_tx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_disconn_pipe(enum ipa_client_type client)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_enable_pipe(enum ipa_client_type client)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_disable_pipe(enum ipa_client_type client)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_tx_dp(enum ipa_client_type dst,
+	struct sk_buff *skb)
+{
+	return -EPERM;
+}
+
+int ipa_wigig_set_perf_profile(u32 max_supported_bw_mbps)
+{
+	return -EPERM;
+}
+#endif /* CONFIG_IPA3 */
+#endif /* _IPA_WIGIG_H_ */
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 56ad38f..8976c0a 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -715,7 +715,7 @@ struct __packed gsi_11ad_rx_channel_scratch {
 	uint32_t status_ring_hwtail_address_msb;
 	uint32_t data_buffers_base_address_lsb;
 	uint32_t data_buffers_base_address_msb:8;
-	uint32_t fixed_data_buffer_size:16;
+	uint32_t fixed_data_buffer_size_pow_2:16;
 	uint32_t resv1:8;
 };
 
@@ -729,7 +729,7 @@ struct __packed gsi_11ad_rx_channel_scratch {
  *	updating descriptor ring 11ad HWTAIL pointer moderation.
  * @resv1: reserved bits.
  * @resv2: reserved bit.
- * @fixed_data_buffer_size: the fixed buffer size (> MTU).
+ * @fixed_data_buffer_size_pow_2: the fixed buffer size power of 2 (> MTU).
  * @resv3: reserved bits.
  */
 struct __packed gsi_11ad_tx_channel_scratch {
@@ -738,7 +738,7 @@ struct __packed gsi_11ad_tx_channel_scratch {
 	uint32_t update_status_hwtail_mod_threshold:8;
 	uint32_t resv1:24;
 	uint32_t resv2:8;
-	uint32_t fixed_data_buffer_size:16;
+	uint32_t fixed_data_buffer_size_pow_2:16;
 	uint32_t resv3:8;
 };
 
@@ -839,7 +839,7 @@ union __packed gsi_evt_scratch {
 	struct __packed gsi_mhi_evt_scratch mhi;
 	struct __packed gsi_xdci_evt_scratch xdci;
 	struct __packed gsi_wdi_evt_scratch wdi;
-	struct __packed gsi_11ad_evt_scratch ad11;
+	struct __packed gsi_11ad_evt_scratch w11ad;
 	struct __packed {
 		uint32_t word1;
 		uint32_t word2;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 8210bab..6734779 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -313,6 +313,7 @@ enum power_supply_property {
 	POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE,
 	POWER_SUPPLY_PROP_TOGGLE_STAT,
 	POWER_SUPPLY_PROP_MAIN_FCC_MAX,
+	POWER_SUPPLY_PROP_FG_RESET,
 	/* Charge pump properties */
 	POWER_SUPPLY_PROP_CP_STATUS1,
 	POWER_SUPPLY_PROP_CP_STATUS2,
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index a15bc4d..30fcec3 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -90,7 +90,10 @@ struct pstore_record {
  *
  * @buf_lock:	spinlock to serialize access to @buf
  * @buf:	preallocated crash dump buffer
- * @bufsize:	size of @buf available for crash dump writes
+ * @bufsize:	size of @buf available for crash dump bytes (must match
+ *		smallest number of bytes available for writing to a
+ *		backend entry, since compressed bytes don't take kindly
+ *		to being truncated)
  *
  * @read_mutex:	serializes @open, @read, @close, and @erase callbacks
  * @flags:	bitfield of frontends the backend can accept writes for
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e6ef9cc..60a2e76 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1355,6 +1355,17 @@ static inline void skb_zcopy_abort(struct sk_buff *skb)
 	}
 }
 
+static inline void skb_mark_not_on_list(struct sk_buff *skb)
+{
+	skb->next = NULL;
+}
+
+static inline void skb_list_del_init(struct sk_buff *skb)
+{
+	__list_del_entry(&skb->list);
+	skb_mark_not_on_list(skb);
+}
+
 /**
  *	skb_queue_empty - check if a queue is empty
  *	@list: queue head
diff --git a/include/linux/spi/spi-geni-qcom.h b/include/linux/spi/spi-geni-qcom.h
new file mode 100644
index 0000000..f305cb0
--- /dev/null
+++ b/include/linux/spi/spi-geni-qcom.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SPI_GENI_QCOM_HEADER___
+#define __SPI_GENI_QCOM_HEADER___
+
+struct spi_geni_qcom_ctrl_data {
+	u32 spi_cs_clk_delay;
+	u32 spi_inter_words_delay;
+};
+
+#endif /*__SPI_GENI_QCOM_HEADER___*/
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 6c1eecd..beeeed1 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -453,6 +453,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
 
 static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
 {
+	unsigned int hh_alen = 0;
 	unsigned int seq;
 	unsigned int hh_len;
 
@@ -460,16 +461,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
 		seq = read_seqbegin(&hh->hh_lock);
 		hh_len = hh->hh_len;
 		if (likely(hh_len <= HH_DATA_MOD)) {
-			/* this is inlined by gcc */
-			memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
-		} else {
-			unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
+			hh_alen = HH_DATA_MOD;
 
-			memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+			/* skb_push() would proceed silently if we have room for
+			 * the unaligned size but not for the aligned size:
+			 * check headroom explicitly.
+			 */
+			if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
+				/* this is inlined by gcc */
+				memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+				       HH_DATA_MOD);
+			}
+		} else {
+			hh_alen = HH_DATA_ALIGN(hh_len);
+
+			if (likely(skb_headroom(skb) >= hh_alen)) {
+				memcpy(skb->data - hh_alen, hh->hh_data,
+				       hh_alen);
+			}
 		}
 	} while (read_seqretry(&hh->hh_lock, seq));
 
-	skb_push(skb, hh_len);
+	if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
+		kfree_skb(skb);
+		return NET_XMIT_DROP;
+	}
+
+	__skb_push(skb, hh_len);
 	return dev_queue_xmit(skb);
 }
 
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
index cd24be4..13d55206b 100644
--- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
 		       const struct nf_nat_range2 *range,
 		       const struct net_device *out);
 
-void nf_nat_masquerade_ipv4_register_notifier(void);
+int nf_nat_masquerade_ipv4_register_notifier(void);
 void nf_nat_masquerade_ipv4_unregister_notifier(void);
 
 #endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
index 0c3b5eb..2917bf9 100644
--- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -5,7 +5,7 @@
 unsigned int
 nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
 		       const struct net_device *out);
-void nf_nat_masquerade_ipv6_register_notifier(void);
+int nf_nat_masquerade_ipv6_register_notifier(void);
 void nf_nat_masquerade_ipv6_unregister_notifier(void);
 
 #endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a11f937..feada35 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -2075,6 +2075,8 @@ struct sctp_association {
 
 	__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
 	__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+
+	struct rcu_head rcu;
 };
 
 
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 61f410f..4914b93 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
 {
 }
 #endif
-
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-struct resource;
-void arch_xen_balloon_init(struct resource *hostmem_resource);
-#endif
diff --git a/init/initramfs.c b/init/initramfs.c
index c18b91d..838f930 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -292,16 +292,6 @@ static int __init do_reset(void)
 	return 1;
 }
 
-static int __init maybe_link(void)
-{
-	if (nlink >= 2) {
-		char *old = find_link(major, minor, ino, mode, collected);
-		if (old)
-			return (ksys_link(old, collected) < 0) ? -1 : 1;
-	}
-	return 0;
-}
-
 static void __init clean_path(char *path, umode_t fmode)
 {
 	struct kstat st;
@@ -314,6 +304,18 @@ static void __init clean_path(char *path, umode_t fmode)
 	}
 }
 
+static int __init maybe_link(void)
+{
+	if (nlink >= 2) {
+		char *old = find_link(major, minor, ino, mode, collected);
+		if (old) {
+			clean_path(collected, 0);
+			return (ksys_link(old, collected) < 0) ? -1 : 1;
+		}
+	}
+	return 0;
+}
+
 static __initdata int wfd;
 
 static int __init do_name(void)
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 830d7f0..fc1605a 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -138,7 +138,8 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
 		return -ENOENT;
 
 	new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
-			   map->value_size, __GFP_ZERO | GFP_USER,
+			   map->value_size,
+			   __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
 			   map->numa_node);
 	if (!new)
 		return -ENOMEM;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5780876..56acfbb 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5283,7 +5283,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
 		return;
 	/* NOTE: fake 'exit' subprog should be updated as well. */
 	for (i = 0; i <= env->subprog_cnt; i++) {
-		if (env->subprog_info[i].start < off)
+		if (env->subprog_info[i].start <= off)
 			continue;
 		env->subprog_info[i].start += len - 1;
 	}
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3ebd09e..97959d7 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -56,7 +56,7 @@ struct kcov {
 	struct task_struct	*t;
 };
 
-static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
+static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
 {
 	unsigned int mode;
 
@@ -78,7 +78,7 @@ static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
 	return mode == needed_mode;
 }
 
-static unsigned long canonicalize_ip(unsigned long ip)
+static notrace unsigned long canonicalize_ip(unsigned long ip)
 {
 #ifdef CONFIG_RANDOMIZE_BASE
 	ip -= kaslr_offset();
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 08fcfe4..9864a35 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -196,11 +196,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
 			i++;
 		} else if (fmt[i] == 'p' || fmt[i] == 's') {
 			mod[fmt_cnt]++;
-			i++;
-			if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
+			/* disallow any further format extensions */
+			if (fmt[i + 1] != 0 &&
+			    !isspace(fmt[i + 1]) &&
+			    !ispunct(fmt[i + 1]))
 				return -EINVAL;
 			fmt_cnt++;
-			if (fmt[i - 1] == 's') {
+			if (fmt[i] == 's') {
 				if (str_seen)
 					/* allow only one '%s' per fmt string */
 					return -EINVAL;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f94be0c..8307faf 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -3397,7 +3397,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
 	entry->parent_ip		= parent_ip;
 
 	event_trigger_unlock_commit(&event_trace_file, buffer, event,
-				    entry, flags, pc);
+				    entry, flags, pc, 0);
  out:
 	atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
 	preempt_enable_notrace();
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index eb908ef..1b4ad08 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -668,7 +668,7 @@ static notrace void trace_event_raw_event_synth(void *__data,
 		}
 	}
 
-	trace_event_buffer_commit(&fbuffer);
+	trace_event_buffer_commit(&fbuffer, sizeof(*entry) + fields_size);
 out:
 	ring_buffer_nest_end(buffer);
 }
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 70935ed..14afeeb 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -135,7 +135,6 @@ static void fill_pool(void)
 		if (!new)
 			return;
 
-		kmemleak_ignore(new);
 		raw_spin_lock_irqsave(&pool_lock, flags);
 		hlist_add_head(&new->node, &obj_pool);
 		debug_objects_allocated++;
@@ -1128,7 +1127,6 @@ static int __init debug_objects_replace_static_objects(void)
 		obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
 		if (!obj)
 			goto free;
-		kmemleak_ignore(obj);
 		hlist_add_head(&obj->node, &objects);
 	}
 
@@ -1184,7 +1182,8 @@ void __init debug_objects_mem_init(void)
 
 	obj_cache = kmem_cache_create("debug_objects_cache",
 				      sizeof (struct debug_obj), 0,
-				      SLAB_DEBUG_OBJECTS, NULL);
+				      SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
+				      NULL);
 
 	if (!obj_cache || debug_objects_replace_static_objects()) {
 		debug_objects_enabled = 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 801ed63..5c9a968 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5790,8 +5790,10 @@ void __meminit init_currently_empty_zone(struct zone *zone,
 					unsigned long size)
 {
 	struct pglist_data *pgdat = zone->zone_pgdat;
+	int zone_idx = zone_idx(zone) + 1;
 
-	pgdat->nr_zones = zone_idx(zone) + 1;
+	if (zone_idx > pgdat->nr_zones)
+		pgdat->nr_zones = zone_idx;
 
 	zone->zone_start_pfn = zone_start_pfn;
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 1027b81..44ccab0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2163,6 +2163,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
 	return active;
 }
 
+static void reset_xps_maps(struct net_device *dev,
+			   struct xps_dev_maps *dev_maps,
+			   bool is_rxqs_map)
+{
+	if (is_rxqs_map) {
+		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
+		RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
+	} else {
+		RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+	}
+	static_key_slow_dec_cpuslocked(&xps_needed);
+	kfree_rcu(dev_maps, rcu);
+}
+
 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
 			   struct xps_dev_maps *dev_maps, unsigned int nr_ids,
 			   u16 offset, u16 count, bool is_rxqs_map)
@@ -2174,18 +2188,15 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
 	     j < nr_ids;)
 		active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
 					       count);
-	if (!active) {
-		if (is_rxqs_map) {
-			RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
-		} else {
-			RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+	if (!active)
+		reset_xps_maps(dev, dev_maps, is_rxqs_map);
 
-			for (i = offset + (count - 1); count--; i--)
-				netdev_queue_numa_node_write(
-					netdev_get_tx_queue(dev, i),
-							NUMA_NO_NODE);
+	if (!is_rxqs_map) {
+		for (i = offset + (count - 1); count--; i--) {
+			netdev_queue_numa_node_write(
+				netdev_get_tx_queue(dev, i),
+				NUMA_NO_NODE);
 		}
-		kfree_rcu(dev_maps, rcu);
 	}
 }
 
@@ -2222,10 +2233,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
 		       false);
 
 out_no_maps:
-	if (static_key_enabled(&xps_rxqs_needed))
-		static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
-
-	static_key_slow_dec_cpuslocked(&xps_needed);
 	mutex_unlock(&xps_map_mutex);
 	cpus_read_unlock();
 }
@@ -2343,9 +2350,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
 	if (!new_dev_maps)
 		goto out_no_new_maps;
 
-	static_key_slow_inc_cpuslocked(&xps_needed);
-	if (is_rxqs_map)
-		static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+	if (!dev_maps) {
+		/* Increment static keys at most once per type */
+		static_key_slow_inc_cpuslocked(&xps_needed);
+		if (is_rxqs_map)
+			static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+	}
 
 	for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
 	     j < nr_ids;) {
@@ -2443,13 +2453,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
 	}
 
 	/* free map if not active */
-	if (!active) {
-		if (is_rxqs_map)
-			RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
-		else
-			RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
-		kfree_rcu(dev_maps, rcu);
-	}
+	if (!active)
+		reset_xps_maps(dev, dev_maps, is_rxqs_map);
 
 out_no_maps:
 	mutex_unlock(&xps_map_mutex);
@@ -4987,7 +4992,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
 		struct net_device *orig_dev = skb->dev;
 		struct packet_type *pt_prev = NULL;
 
-		list_del(&skb->list);
+		skb_list_del_init(skb);
 		__netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
 		if (!pt_prev)
 			continue;
@@ -5143,7 +5148,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
 	INIT_LIST_HEAD(&sublist);
 	list_for_each_entry_safe(skb, next, head, list) {
 		net_timestamp_check(netdev_tstamp_prequeue, skb);
-		list_del(&skb->list);
+		skb_list_del_init(skb);
 		if (!skb_defer_rx_timestamp(skb))
 			list_add_tail(&skb->list, &sublist);
 	}
@@ -5154,7 +5159,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
 		rcu_read_lock();
 		list_for_each_entry_safe(skb, next, head, list) {
 			xdp_prog = rcu_dereference(skb->dev->xdp_prog);
-			list_del(&skb->list);
+			skb_list_del_init(skb);
 			if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
 				list_add_tail(&skb->list, &sublist);
 		}
@@ -5173,7 +5178,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
 
 			if (cpu >= 0) {
 				/* Will be handled, remove from list */
-				list_del(&skb->list);
+				skb_list_del_init(skb);
 				enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
 			}
 		}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 6e5d61a..ebde98b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3730,6 +3730,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
 {
 	int err;
 
+	if (dev->type != ARPHRD_ETHER)
+		return -EINVAL;
+
 	netif_addr_lock_bh(dev);
 	err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
 	if (err)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index cb8fa5d..f686d77 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -513,6 +513,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
 	struct rb_node *rbn;
 	int len;
 	int ihlen;
+	int delta;
 	int err;
 	u8 ecn;
 
@@ -554,10 +555,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
 	if (len > 65535)
 		goto out_oversize;
 
+	delta = - head->truesize;
+
 	/* Head of list must not be cloned. */
 	if (skb_unclone(head, GFP_ATOMIC))
 		goto out_nomem;
 
+	delta += head->truesize;
+	if (delta)
+		add_frag_mem_limit(qp->q.net, delta);
+
 	/* If the first fragment is fragmented itself, we split
 	 * it to two chunks: the first with data and paged part
 	 * and the second, holding only fragments. */
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 3196cf5..27c863f 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -551,7 +551,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
 	list_for_each_entry_safe(skb, next, head, list) {
 		struct dst_entry *dst;
 
-		list_del(&skb->list);
+		skb_list_del_init(skb);
 		/* if ingress device is enslaved to an L3 master device pass the
 		 * skb to its handler for processing
 		 */
@@ -598,7 +598,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
 		struct net_device *dev = skb->dev;
 		struct net *net = dev_net(dev);
 
-		list_del(&skb->list);
+		skb_list_del_init(skb);
 		skb = ip_rcv_core(skb, net);
 		if (skb == NULL)
 			continue;
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index ce1512b..fd3f9e8 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -81,9 +81,12 @@ static int __init masquerade_tg_init(void)
 	int ret;
 
 	ret = xt_register_target(&masquerade_tg_reg);
+	if (ret)
+		return ret;
 
-	if (ret == 0)
-		nf_nat_masquerade_ipv4_register_notifier();
+	ret = nf_nat_masquerade_ipv4_register_notifier();
+	if (ret)
+		xt_unregister_target(&masquerade_tg_reg);
 
 	return ret;
 }
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
index ad3aeff..4c7fcd3 100644
--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
@@ -131,28 +131,50 @@ static struct notifier_block masq_inet_notifier = {
 	.notifier_call	= masq_inet_event,
 };
 
-static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
+static int masq_refcnt;
+static DEFINE_MUTEX(masq_mutex);
 
-void nf_nat_masquerade_ipv4_register_notifier(void)
+int nf_nat_masquerade_ipv4_register_notifier(void)
 {
+	int ret = 0;
+
+	mutex_lock(&masq_mutex);
 	/* check if the notifier was already set */
-	if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
-		return;
+	if (++masq_refcnt > 1)
+		goto out_unlock;
 
 	/* Register for device down reports */
-	register_netdevice_notifier(&masq_dev_notifier);
+	ret = register_netdevice_notifier(&masq_dev_notifier);
+	if (ret)
+		goto err_dec;
 	/* Register IP address change reports */
-	register_inetaddr_notifier(&masq_inet_notifier);
+	ret = register_inetaddr_notifier(&masq_inet_notifier);
+	if (ret)
+		goto err_unregister;
+
+	mutex_unlock(&masq_mutex);
+	return ret;
+
+err_unregister:
+	unregister_netdevice_notifier(&masq_dev_notifier);
+err_dec:
+	masq_refcnt--;
+out_unlock:
+	mutex_unlock(&masq_mutex);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
 
 void nf_nat_masquerade_ipv4_unregister_notifier(void)
 {
+	mutex_lock(&masq_mutex);
 	/* check if the notifier still has clients */
-	if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
-		return;
+	if (--masq_refcnt > 0)
+		goto out_unlock;
 
 	unregister_netdevice_notifier(&masq_dev_notifier);
 	unregister_inetaddr_notifier(&masq_inet_notifier);
+out_unlock:
+	mutex_unlock(&masq_mutex);
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index f1193e1..6847de1 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -69,7 +69,9 @@ static int __init nft_masq_ipv4_module_init(void)
 	if (ret < 0)
 		return ret;
 
-	nf_nat_masquerade_ipv4_register_notifier();
+	ret = nf_nat_masquerade_ipv4_register_notifier();
+	if (ret)
+		nft_unregister_expr(&nft_masq_ipv4_type);
 
 	return ret;
 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 05291b2..26f27d1 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1885,7 +1885,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
  * This algorithm is from John Heffner.
  */
 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
-				 bool *is_cwnd_limited, u32 max_segs)
+				 bool *is_cwnd_limited,
+				 bool *is_rwnd_limited,
+				 u32 max_segs)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	u32 age, send_win, cong_win, limit, in_flight;
@@ -1893,9 +1895,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 	struct sk_buff *head;
 	int win_divisor;
 
-	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
-		goto send_now;
-
 	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
 		goto send_now;
 
@@ -1954,10 +1953,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 	if (age < (tp->srtt_us >> 4))
 		goto send_now;
 
-	/* Ok, it looks like it is advisable to defer. */
+	/* Ok, it looks like it is advisable to defer.
+	 * Three cases are tracked :
+	 * 1) We are cwnd-limited
+	 * 2) We are rwnd-limited
+	 * 3) We are application limited.
+	 */
+	if (cong_win < send_win) {
+		if (cong_win <= skb->len) {
+			*is_cwnd_limited = true;
+			return true;
+		}
+	} else {
+		if (send_win <= skb->len) {
+			*is_rwnd_limited = true;
+			return true;
+		}
+	}
 
-	if (cong_win < send_win && cong_win <= skb->len)
-		*is_cwnd_limited = true;
+	/* If this packet won't get more data, do not wait. */
+	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+		goto send_now;
 
 	return true;
 
@@ -2321,7 +2337,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
 		} else {
 			if (!push_one &&
 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
-						 max_segs))
+						 &is_rwnd_limited, max_segs))
 				break;
 		}
 
@@ -2459,15 +2475,18 @@ void tcp_send_loss_probe(struct sock *sk)
 		goto rearm_timer;
 	}
 	skb = skb_rb_last(&sk->tcp_rtx_queue);
+	if (unlikely(!skb)) {
+		WARN_ONCE(tp->packets_out,
+			  "invalid inflight: %u state %u cwnd %u mss %d\n",
+			  tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
+		inet_csk(sk)->icsk_pending = 0;
+		return;
+	}
 
 	/* At most one outstanding TLP retransmission. */
 	if (tp->tlp_high_seq)
 		goto rearm_timer;
 
-	/* Retransmit last segment. */
-	if (WARN_ON(!skb))
-		goto rearm_timer;
-
 	if (skb_still_in_host_queue(sk, skb))
 		goto rearm_timer;
 
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 6242682..6b74523 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -95,7 +95,7 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
 	list_for_each_entry_safe(skb, next, head, list) {
 		struct dst_entry *dst;
 
-		list_del(&skb->list);
+		skb_list_del_init(skb);
 		/* if ingress device is enslaved to an L3 master device pass the
 		 * skb to its handler for processing
 		 */
@@ -295,7 +295,7 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
 		struct net_device *dev = skb->dev;
 		struct net *net = dev_net(dev);
 
-		list_del(&skb->list);
+		skb_list_del_init(skb);
 		skb = ip6_rcv_core(skb, dev, net);
 		if (skb == NULL)
 			continue;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index f9f8f55..2694def 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -195,37 +195,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 	const struct ipv6_pinfo *np = inet6_sk(sk);
 	struct in6_addr *first_hop = &fl6->daddr;
 	struct dst_entry *dst = skb_dst(skb);
+	unsigned int head_room;
 	struct ipv6hdr *hdr;
 	u8  proto = fl6->flowi6_proto;
 	int seg_len = skb->len;
 	int hlimit = -1;
 	u32 mtu;
 
-	if (opt) {
-		unsigned int head_room;
+	head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
+	if (opt)
+		head_room += opt->opt_nflen + opt->opt_flen;
 
-		/* First: exthdrs may take lots of space (~8K for now)
-		   MAX_HEADER is not enough.
-		 */
-		head_room = opt->opt_nflen + opt->opt_flen;
-		seg_len += head_room;
-		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
-
-		if (skb_headroom(skb) < head_room) {
-			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
-			if (!skb2) {
-				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-					      IPSTATS_MIB_OUTDISCARDS);
-				kfree_skb(skb);
-				return -ENOBUFS;
-			}
-			if (skb->sk)
-				skb_set_owner_w(skb2, skb->sk);
-			consume_skb(skb);
-			skb = skb2;
+	if (unlikely(skb_headroom(skb) < head_room)) {
+		struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
+		if (!skb2) {
+			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+				      IPSTATS_MIB_OUTDISCARDS);
+			kfree_skb(skb);
+			return -ENOBUFS;
 		}
+		if (skb->sk)
+			skb_set_owner_w(skb2, skb->sk);
+		consume_skb(skb);
+		skb = skb2;
+	}
+
+	if (opt) {
+		seg_len += opt->opt_nflen + opt->opt_flen;
+
 		if (opt->opt_flen)
 			ipv6_push_frag_opts(skb, opt, &proto);
+
 		if (opt->opt_nflen)
 			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
 					     &fl6->saddr);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 5ae8e1c..8b075f0 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -24,7 +24,8 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
 	unsigned int hh_len;
 	struct dst_entry *dst;
 	struct flowi6 fl6 = {
-		.flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
+		.flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
+			rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
 		.flowi6_mark = skb->mark,
 		.flowi6_uid = sock_net_uid(net, sk),
 		.daddr = iph->daddr,
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
index 491f808..29c7f19 100644
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -58,8 +58,12 @@ static int __init masquerade_tg6_init(void)
 	int err;
 
 	err = xt_register_target(&masquerade_tg6_reg);
-	if (err == 0)
-		nf_nat_masquerade_ipv6_register_notifier();
+	if (err)
+		return err;
+
+	err = nf_nat_masquerade_ipv6_register_notifier();
+	if (err)
+		xt_unregister_target(&masquerade_tg6_reg);
 
 	return err;
 }
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index f76bd4d..043ed8e 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -341,7 +341,7 @@ static bool
 nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_device *dev)
 {
 	struct sk_buff *fp, *head = fq->q.fragments;
-	int    payload_len;
+	int    payload_len, delta;
 	u8 ecn;
 
 	inet_frag_kill(&fq->q);
@@ -363,10 +363,16 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_devic
 		return false;
 	}
 
+	delta = - head->truesize;
+
 	/* Head of list must not be cloned. */
 	if (skb_unclone(head, GFP_ATOMIC))
 		return false;
 
+	delta += head->truesize;
+	if (delta)
+		add_frag_mem_limit(fq->q.net, delta);
+
 	/* If the first fragment is fragmented itself, we split
 	 * it to two chunks: the first with data and paged part
 	 * and the second, holding only fragments. */
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
index e6eb7cf..37b1d413 100644
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -120,8 +120,8 @@ static void iterate_cleanup_work(struct work_struct *work)
  * of ipv6 addresses being deleted), we also need to add an upper
  * limit to the number of queued work items.
  */
-static int masq_inet_event(struct notifier_block *this,
-			   unsigned long event, void *ptr)
+static int masq_inet6_event(struct notifier_block *this,
+			    unsigned long event, void *ptr)
 {
 	struct inet6_ifaddr *ifa = ptr;
 	const struct net_device *dev;
@@ -158,30 +158,53 @@ static int masq_inet_event(struct notifier_block *this,
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block masq_inet_notifier = {
-	.notifier_call	= masq_inet_event,
+static struct notifier_block masq_inet6_notifier = {
+	.notifier_call	= masq_inet6_event,
 };
 
-static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
+static int masq_refcnt;
+static DEFINE_MUTEX(masq_mutex);
 
-void nf_nat_masquerade_ipv6_register_notifier(void)
+int nf_nat_masquerade_ipv6_register_notifier(void)
 {
-	/* check if the notifier is already set */
-	if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
-		return;
+	int ret = 0;
 
-	register_netdevice_notifier(&masq_dev_notifier);
-	register_inet6addr_notifier(&masq_inet_notifier);
+	mutex_lock(&masq_mutex);
+	/* check if the notifier is already set */
+	if (++masq_refcnt > 1)
+		goto out_unlock;
+
+	ret = register_netdevice_notifier(&masq_dev_notifier);
+	if (ret)
+		goto err_dec;
+
+	ret = register_inet6addr_notifier(&masq_inet6_notifier);
+	if (ret)
+		goto err_unregister;
+
+	mutex_unlock(&masq_mutex);
+	return ret;
+
+err_unregister:
+	unregister_netdevice_notifier(&masq_dev_notifier);
+err_dec:
+	masq_refcnt--;
+out_unlock:
+	mutex_unlock(&masq_mutex);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
 
 void nf_nat_masquerade_ipv6_unregister_notifier(void)
 {
+	mutex_lock(&masq_mutex);
 	/* check if the notifier still has clients */
-	if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
-		return;
+	if (--masq_refcnt > 0)
+		goto out_unlock;
 
-	unregister_inet6addr_notifier(&masq_inet_notifier);
+	unregister_inet6addr_notifier(&masq_inet6_notifier);
 	unregister_netdevice_notifier(&masq_dev_notifier);
+out_unlock:
+	mutex_unlock(&masq_mutex);
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
index dd0122f..e06c82e 100644
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -70,7 +70,9 @@ static int __init nft_masq_ipv6_module_init(void)
 	if (ret < 0)
 		return ret;
 
-	nf_nat_masquerade_ipv6_register_notifier();
+	ret = nf_nat_masquerade_ipv6_register_notifier();
+	if (ret)
+		nft_unregister_expr(&nft_masq_ipv6_type);
 
 	return ret;
 }
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 5c5b4f7..d3fd2d7 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -281,7 +281,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 {
 	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
 	struct sk_buff *fp, *head = fq->q.fragments;
-	int    payload_len;
+	int    payload_len, delta;
 	unsigned int nhoff;
 	int sum_truesize;
 	u8 ecn;
@@ -322,10 +322,16 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 	if (payload_len > IPV6_MAXPLEN)
 		goto out_oversize;
 
+	delta = - head->truesize;
+
 	/* Head of list must not be cloned. */
 	if (skb_unclone(head, GFP_ATOMIC))
 		goto out_oom;
 
+	delta += head->truesize;
+	if (delta)
+		add_frag_mem_limit(fq->q.net, delta);
+
 	/* If the first fragment is fragmented itself, we split
 	 * it to two chunks: the first with data and paged part
 	 * and the second, holding only fragments. */
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index a8854dd..8181ee7 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -347,6 +347,7 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 		struct ipv6hdr *hdr = ipv6_hdr(skb);
 		struct flowi6 fl6;
 
+		memset(&fl6, 0, sizeof(fl6));
 		fl6.daddr = hdr->daddr;
 		fl6.saddr = hdr->saddr;
 		fl6.flowlabel = ip6_flowinfo(hdr);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 62eefea..518364f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3980,6 +3980,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs)
 
 static struct notifier_block ip_vs_dst_notifier = {
 	.notifier_call = ip_vs_dst_event,
+#ifdef CONFIG_IP_VS_IPV6
+	.priority = ADDRCONF_NOTIFY_PRIORITY + 5,
+#endif
 };
 
 int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 02ca7df..b6d0f6d 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -49,6 +49,7 @@ struct nf_conncount_tuple {
 	struct nf_conntrack_zone	zone;
 	int				cpu;
 	u32				jiffies32;
+	bool				dead;
 	struct rcu_head			rcu_head;
 };
 
@@ -106,15 +107,16 @@ nf_conncount_add(struct nf_conncount_list *list,
 	conn->zone = *zone;
 	conn->cpu = raw_smp_processor_id();
 	conn->jiffies32 = (u32)jiffies;
-	spin_lock(&list->list_lock);
+	conn->dead = false;
+	spin_lock_bh(&list->list_lock);
 	if (list->dead == true) {
 		kmem_cache_free(conncount_conn_cachep, conn);
-		spin_unlock(&list->list_lock);
+		spin_unlock_bh(&list->list_lock);
 		return NF_CONNCOUNT_SKIP;
 	}
 	list_add_tail(&conn->node, &list->head);
 	list->count++;
-	spin_unlock(&list->list_lock);
+	spin_unlock_bh(&list->list_lock);
 	return NF_CONNCOUNT_ADDED;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_add);
@@ -132,19 +134,22 @@ static bool conn_free(struct nf_conncount_list *list,
 {
 	bool free_entry = false;
 
-	spin_lock(&list->list_lock);
+	spin_lock_bh(&list->list_lock);
 
-	if (list->count == 0) {
-		spin_unlock(&list->list_lock);
-                return free_entry;
+	if (conn->dead) {
+		spin_unlock_bh(&list->list_lock);
+		return free_entry;
 	}
 
 	list->count--;
+	conn->dead = true;
 	list_del_rcu(&conn->node);
-	if (list->count == 0)
+	if (list->count == 0) {
+		list->dead = true;
 		free_entry = true;
+	}
 
-	spin_unlock(&list->list_lock);
+	spin_unlock_bh(&list->list_lock);
 	call_rcu(&conn->rcu_head, __conn_free);
 	return free_entry;
 }
@@ -245,7 +250,7 @@ void nf_conncount_list_init(struct nf_conncount_list *list)
 {
 	spin_lock_init(&list->list_lock);
 	INIT_LIST_HEAD(&list->head);
-	list->count = 1;
+	list->count = 0;
 	list->dead = false;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_list_init);
@@ -259,6 +264,7 @@ bool nf_conncount_gc_list(struct net *net,
 	struct nf_conn *found_ct;
 	unsigned int collected = 0;
 	bool free_entry = false;
+	bool ret = false;
 
 	list_for_each_entry_safe(conn, conn_n, &list->head, node) {
 		found = find_or_evict(net, list, conn, &free_entry);
@@ -288,7 +294,15 @@ bool nf_conncount_gc_list(struct net *net,
 		if (collected > CONNCOUNT_GC_MAX_NODES)
 			return false;
 	}
-	return false;
+
+	spin_lock_bh(&list->list_lock);
+	if (!list->count) {
+		list->dead = true;
+		ret = true;
+	}
+	spin_unlock_bh(&list->list_lock);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
 
@@ -309,11 +323,8 @@ static void tree_nodes_free(struct rb_root *root,
 	while (gc_count) {
 		rbconn = gc_nodes[--gc_count];
 		spin_lock(&rbconn->list.list_lock);
-		if (rbconn->list.count == 0 && rbconn->list.dead == false) {
-			rbconn->list.dead = true;
-			rb_erase(&rbconn->node, root);
-			call_rcu(&rbconn->rcu_head, __tree_nodes_free);
-		}
+		rb_erase(&rbconn->node, root);
+		call_rcu(&rbconn->rcu_head, __tree_nodes_free);
 		spin_unlock(&rbconn->list.list_lock);
 	}
 }
@@ -414,6 +425,7 @@ insert_tree(struct net *net,
 	nf_conncount_list_init(&rbconn->list);
 	list_add(&conn->node, &rbconn->list.head);
 	count = 1;
+	rbconn->list.count = count;
 
 	rb_link_node(&rbconn->node, parent, rbnode);
 	rb_insert_color(&rbconn->node, root);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2cfb173..fe0558b 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2432,7 +2432,7 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
 static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
 				   struct nft_rule *rule)
 {
-	struct nft_expr *expr;
+	struct nft_expr *expr, *next;
 
 	lockdep_assert_held(&ctx->net->nft.commit_mutex);
 	/*
@@ -2441,8 +2441,9 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
 	 */
 	expr = nft_expr_first(rule);
 	while (expr != nft_expr_last(rule) && expr->ops) {
+		next = nft_expr_next(expr);
 		nf_tables_expr_destroy(ctx, expr);
-		expr = nft_expr_next(expr);
+		expr = next;
 	}
 	kfree(rule);
 }
@@ -2645,21 +2646,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
 	}
 
 	if (nlh->nlmsg_flags & NLM_F_REPLACE) {
-		if (!nft_is_active_next(net, old_rule)) {
-			err = -ENOENT;
-			goto err2;
-		}
-		trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
-					   old_rule);
+		trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
 		if (trans == NULL) {
 			err = -ENOMEM;
 			goto err2;
 		}
-		nft_deactivate_next(net, old_rule);
-		chain->use--;
-
-		if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
-			err = -ENOMEM;
+		err = nft_delrule(&ctx, old_rule);
+		if (err < 0) {
+			nft_trans_destroy(trans);
 			goto err2;
 		}
 
@@ -6277,7 +6271,7 @@ static void nf_tables_commit_chain_free_rules_old(struct nft_rule **rules)
 	call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old);
 }
 
-static void nf_tables_commit_chain_active(struct net *net, struct nft_chain *chain)
+static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
 {
 	struct nft_rule **g0, **g1;
 	bool next_genbit;
@@ -6363,11 +6357,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 
 	/* step 2.  Make rules_gen_X visible to packet path */
 	list_for_each_entry(table, &net->nft.tables, list) {
-		list_for_each_entry(chain, &table->chains, list) {
-			if (!nft_is_active_next(net, chain))
-				continue;
-			nf_tables_commit_chain_active(net, chain);
-		}
+		list_for_each_entry(chain, &table->chains, list)
+			nf_tables_commit_chain(net, chain);
 	}
 
 	/*
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index ad2fe6a..29d6fc7 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -501,6 +501,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
 		    void *info)
 {
 	struct xt_match *match = expr->ops->data;
+	struct module *me = match->me;
 	struct xt_mtdtor_param par;
 
 	par.net = ctx->net;
@@ -511,7 +512,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
 		par.match->destroy(&par);
 
 	if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
-		module_put(match->me);
+		module_put(me);
 }
 
 static void
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index d6bab8c..5fd4c57 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -214,7 +214,9 @@ static int __init nft_flow_offload_module_init(void)
 {
 	int err;
 
-	register_netdevice_notifier(&flow_offload_netdev_notifier);
+	err = register_netdevice_notifier(&flow_offload_netdev_notifier);
+	if (err)
+		goto err;
 
 	err = nft_register_expr(&nft_flow_offload_type);
 	if (err < 0)
@@ -224,6 +226,7 @@ static int __init nft_flow_offload_module_init(void)
 
 register_expr:
 	unregister_netdevice_notifier(&flow_offload_netdev_notifier);
+err:
 	return err;
 }
 
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index dec843c..9e05c86b 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -201,18 +201,8 @@ static __net_init int xt_rateest_net_init(struct net *net)
 	return 0;
 }
 
-static void __net_exit xt_rateest_net_exit(struct net *net)
-{
-	struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(xn->hash); i++)
-		WARN_ON_ONCE(!hlist_empty(&xn->hash[i]));
-}
-
 static struct pernet_operations xt_rateest_net_ops = {
 	.init = xt_rateest_net_init,
-	.exit = xt_rateest_net_exit,
 	.id   = &xt_rateest_id,
 	.size = sizeof(struct xt_rateest_net),
 };
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 3e7d259..1ad4017 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -295,9 +295,10 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
 
 	/* copy match config into hashtable config */
 	ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3);
-
-	if (ret)
+	if (ret) {
+		vfree(hinfo);
 		return ret;
+	}
 
 	hinfo->cfg.size = size;
 	if (hinfo->cfg.max == 0)
@@ -814,7 +815,6 @@ hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
 	int ret;
 
 	ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
-
 	if (ret)
 		return ret;
 
@@ -830,7 +830,6 @@ hashlimit_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
 	int ret;
 
 	ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
-
 	if (ret)
 		return ret;
 
@@ -921,7 +920,6 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
 		return ret;
 
 	ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
-
 	if (ret)
 		return ret;
 
@@ -940,7 +938,6 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
 		return ret;
 
 	ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
-
 	if (ret)
 		return ret;
 
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index ad18a20..74c0f65 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -441,6 +441,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	int count = 1;
 	int rc = NET_XMIT_SUCCESS;
 
+	/* Do not fool qdisc_drop_all() */
+	skb->prev = NULL;
+
 	/* Random duplication */
 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
 		++count;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 6a28b96..914750b 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -118,9 +118,6 @@ static struct sctp_association *sctp_association_init(
 	asoc->flowlabel = sp->flowlabel;
 	asoc->dscp = sp->dscp;
 
-	/* Initialize default path MTU. */
-	asoc->pathmtu = sp->pathmtu;
-
 	/* Set association default SACK delay */
 	asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
 	asoc->sackfreq = sp->sackfreq;
@@ -252,6 +249,10 @@ static struct sctp_association *sctp_association_init(
 			     0, gfp))
 		goto fail_init;
 
+	/* Initialize default path MTU. */
+	asoc->pathmtu = sp->pathmtu;
+	sctp_assoc_update_frag_point(asoc);
+
 	/* Assume that peer would support both address types unless we are
 	 * told otherwise.
 	 */
@@ -434,7 +435,7 @@ static void sctp_association_destroy(struct sctp_association *asoc)
 
 	WARN_ON(atomic_read(&asoc->rmem_alloc));
 
-	kfree(asoc);
+	kfree_rcu(asoc, rcu);
 	SCTP_DBG_OBJCNT_DEC(assoc);
 }
 
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 4a4fd19..f4ac6c5 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2462,6 +2462,9 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
 			     asoc->c.sinit_max_instreams, gfp))
 		goto clean_up;
 
+	/* Update frag_point when stream_interleave may get changed. */
+	sctp_assoc_update_frag_point(asoc);
+
 	if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
 		goto clean_up;
 
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
index 64c3cb0..654a503 100644
--- a/sound/firewire/fireface/ff-protocol-ff400.c
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
@@ -30,7 +30,7 @@ static int ff400_get_clock(struct snd_ff *ff, unsigned int *rate,
 	int err;
 
 	err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
-				 FF400_SYNC_STATUS, &reg, sizeof(reg), 0);
+				 FF400_CLOCK_CONFIG, &reg, sizeof(reg), 0);
 	if (err < 0)
 		return err;
 	data = le32_to_cpu(reg);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 22ca1f0..8a3d069 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5520,6 +5520,9 @@ enum {
 	ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
 	ALC295_FIXUP_HP_AUTO_MUTE,
 	ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
+	ALC294_FIXUP_ASUS_MIC,
+	ALC294_FIXUP_ASUS_HEADSET_MIC,
+	ALC294_FIXUP_ASUS_SPK,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6392,6 +6395,8 @@ static const struct hda_fixup alc269_fixups[] = {
 	[ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = {
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc285_fixup_invalidate_dacs,
+		.chained = true,
+		.chain_id = ALC269_FIXUP_THINKPAD_ACPI
 	},
 	[ALC295_FIXUP_HP_AUTO_MUTE] = {
 		.type = HDA_FIXUP_FUNC,
@@ -6406,6 +6411,36 @@ static const struct hda_fixup alc269_fixups[] = {
 		.chained = true,
 		.chain_id = ALC269_FIXUP_HEADSET_MIC
 	},
+	[ALC294_FIXUP_ASUS_MIC] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x13, 0x90a60160 }, /* use as internal mic */
+			{ 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+	},
+	[ALC294_FIXUP_ASUS_HEADSET_MIC] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+	},
+	[ALC294_FIXUP_ASUS_SPK] = {
+		.type = HDA_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
+			/* Set EAPD high */
+			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x40 },
+			{ 0x20, AC_VERB_SET_PROC_COEF, 0x8800 },
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+	},
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6548,6 +6583,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+	SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -7155,6 +7191,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
 	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
 		{0x13, 0x90a60140}),
+	SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC,
+		{0x14, 0x90170110},
+		{0x1b, 0x90a70130},
+		{0x21, 0x04211020}),
+	SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+		{0x12, 0x90a60130},
+		{0x17, 0x90170110},
+		{0x21, 0x04211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC295_STANDARD_PINS,
 		{0x17, 0x21014020},
@@ -7227,6 +7271,37 @@ static void alc269_fill_coef(struct hda_codec *codec)
 	alc_update_coef_idx(codec, 0x4, 0, 1<<11);
 }
 
+static void alc294_hp_init(struct hda_codec *codec)
+{
+	struct alc_spec *spec = codec->spec;
+	hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+	int i, val;
+
+	if (!hp_pin)
+		return;
+
+	snd_hda_codec_write(codec, hp_pin, 0,
+			    AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+	msleep(100);
+
+	snd_hda_codec_write(codec, hp_pin, 0,
+			    AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+	alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
+	alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
+
+	/* Wait for depop procedure finish  */
+	val = alc_read_coefex_idx(codec, 0x58, 0x01);
+	for (i = 0; i < 20 && val & 0x0080; i++) {
+		msleep(50);
+		val = alc_read_coefex_idx(codec, 0x58, 0x01);
+	}
+	/* Set HP depop to auto mode */
+	alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
+	msleep(50);
+}
+
 /*
  */
 static int patch_alc269(struct hda_codec *codec)
@@ -7352,6 +7427,7 @@ static int patch_alc269(struct hda_codec *codec)
 		spec->codec_variant = ALC269_TYPE_ALC294;
 		spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
 		alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
+		alc294_hp_init(codec);
 		break;
 	case 0x10ec0300:
 		spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7363,6 +7439,7 @@ static int patch_alc269(struct hda_codec *codec)
 		spec->codec_variant = ALC269_TYPE_ALC700;
 		spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
 		alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
+		alc294_hp_init(codec);
 		break;
 
 	}
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 7b8533a..b61d518 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -2184,11 +2184,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
 	 */
 	snd_hdac_codec_read(hdev, hdev->afg, 0,	AC_VERB_SET_POWER_STATE,
 							AC_PWRST_D3);
-	err = snd_hdac_display_power(bus, false);
-	if (err < 0) {
-		dev_err(dev, "Cannot turn on display power on i915\n");
-		return err;
-	}
 
 	hlink = snd_hdac_ext_bus_get_link(bus, dev_name(dev));
 	if (!hlink) {
@@ -2198,7 +2193,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
 
 	snd_hdac_ext_bus_link_put(bus, hlink);
 
-	return 0;
+	err = snd_hdac_display_power(bus, false);
+	if (err < 0)
+		dev_err(dev, "Cannot turn off display power on i915\n");
+
+	return err;
 }
 
 static int hdac_hdmi_runtime_resume(struct device *dev)
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index f616560..4d3ec29 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -765,38 +765,41 @@ static unsigned int wm_adsp_region_to_reg(struct wm_adsp_region const *mem,
 
 static void wm_adsp2_show_fw_status(struct wm_adsp *dsp)
 {
-	u16 scratch[4];
+	unsigned int scratch[4];
+	unsigned int addr = dsp->base + ADSP2_SCRATCH0;
+	unsigned int i;
 	int ret;
 
-	ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2_SCRATCH0,
-				scratch, sizeof(scratch));
-	if (ret) {
-		adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
-		return;
+	for (i = 0; i < ARRAY_SIZE(scratch); ++i) {
+		ret = regmap_read(dsp->regmap, addr + i, &scratch[i]);
+		if (ret) {
+			adsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
+			return;
+		}
 	}
 
 	adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
-		 be16_to_cpu(scratch[0]),
-		 be16_to_cpu(scratch[1]),
-		 be16_to_cpu(scratch[2]),
-		 be16_to_cpu(scratch[3]));
+		 scratch[0], scratch[1], scratch[2], scratch[3]);
 }
 
 static void wm_adsp2v2_show_fw_status(struct wm_adsp *dsp)
 {
-	u32 scratch[2];
+	unsigned int scratch[2];
 	int ret;
 
-	ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
-			      scratch, sizeof(scratch));
-
+	ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
+			  &scratch[0]);
 	if (ret) {
-		adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
+		adsp_err(dsp, "Failed to read SCRATCH0_1: %d\n", ret);
 		return;
 	}
 
-	scratch[0] = be32_to_cpu(scratch[0]);
-	scratch[1] = be32_to_cpu(scratch[1]);
+	ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH2_3,
+			  &scratch[1]);
+	if (ret) {
+		adsp_err(dsp, "Failed to read SCRATCH2_3: %d\n", ret);
+		return;
+	}
 
 	adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
 		 scratch[0] & 0xFFFF,
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 1d17be0..50f16a0 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -752,6 +752,12 @@ static void skl_probe_work(struct work_struct *work)
 		}
 	}
 
+	/*
+	 * we are done probing so decrement link counts
+	 */
+	list_for_each_entry(hlink, &bus->hlink_list, list)
+		snd_hdac_ext_bus_link_put(bus, hlink);
+
 	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
 		err = snd_hdac_display_power(bus, false);
 		if (err < 0) {
@@ -761,12 +767,6 @@ static void skl_probe_work(struct work_struct *work)
 		}
 	}
 
-	/*
-	 * we are done probing so decrement link counts
-	 */
-	list_for_each_entry(hlink, &bus->hlink_list, list)
-		snd_hdac_ext_bus_link_put(bus, hlink);
-
 	/* configure PM */
 	pm_runtime_put_noidle(bus->dev);
 	pm_runtime_allow(bus->dev);
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index d5ae9eb..fed45b4 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -36,6 +36,8 @@
 #include "../codecs/twl6040.h"
 
 struct abe_twl6040 {
+	struct snd_soc_card card;
+	struct snd_soc_dai_link dai_links[2];
 	int	jack_detection;	/* board can detect jack events */
 	int	mclk_freq;	/* MCLK frequency speed for twl6040 */
 };
@@ -208,40 +210,10 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
 				ARRAY_SIZE(dmic_audio_map));
 }
 
-/* Digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
-	{
-		.name = "TWL6040",
-		.stream_name = "TWL6040",
-		.codec_dai_name = "twl6040-legacy",
-		.codec_name = "twl6040-codec",
-		.init = omap_abe_twl6040_init,
-		.ops = &omap_abe_ops,
-	},
-	{
-		.name = "DMIC",
-		.stream_name = "DMIC Capture",
-		.codec_dai_name = "dmic-hifi",
-		.codec_name = "dmic-codec",
-		.init = omap_abe_dmic_init,
-		.ops = &omap_abe_dmic_ops,
-	},
-};
-
-/* Audio machine driver */
-static struct snd_soc_card omap_abe_card = {
-	.owner = THIS_MODULE,
-
-	.dapm_widgets = twl6040_dapm_widgets,
-	.num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets),
-	.dapm_routes = audio_map,
-	.num_dapm_routes = ARRAY_SIZE(audio_map),
-};
-
 static int omap_abe_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
-	struct snd_soc_card *card = &omap_abe_card;
+	struct snd_soc_card *card;
 	struct device_node *dai_node;
 	struct abe_twl6040 *priv;
 	int num_links = 0;
@@ -252,12 +224,18 @@ static int omap_abe_probe(struct platform_device *pdev)
 		return -ENODEV;
 	}
 
-	card->dev = &pdev->dev;
-
 	priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
 	if (priv == NULL)
 		return -ENOMEM;
 
+	card = &priv->card;
+	card->dev = &pdev->dev;
+	card->owner = THIS_MODULE;
+	card->dapm_widgets = twl6040_dapm_widgets;
+	card->num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets);
+	card->dapm_routes = audio_map;
+	card->num_dapm_routes = ARRAY_SIZE(audio_map);
+
 	if (snd_soc_of_parse_card_name(card, "ti,model")) {
 		dev_err(&pdev->dev, "Card name is not provided\n");
 		return -ENODEV;
@@ -274,14 +252,27 @@ static int omap_abe_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev, "McPDM node is not provided\n");
 		return -EINVAL;
 	}
-	abe_twl6040_dai_links[0].cpu_of_node = dai_node;
-	abe_twl6040_dai_links[0].platform_of_node = dai_node;
+
+	priv->dai_links[0].name = "DMIC";
+	priv->dai_links[0].stream_name = "TWL6040";
+	priv->dai_links[0].cpu_of_node = dai_node;
+	priv->dai_links[0].platform_of_node = dai_node;
+	priv->dai_links[0].codec_dai_name = "twl6040-legacy";
+	priv->dai_links[0].codec_name = "twl6040-codec";
+	priv->dai_links[0].init = omap_abe_twl6040_init;
+	priv->dai_links[0].ops = &omap_abe_ops;
 
 	dai_node = of_parse_phandle(node, "ti,dmic", 0);
 	if (dai_node) {
 		num_links = 2;
-		abe_twl6040_dai_links[1].cpu_of_node = dai_node;
-		abe_twl6040_dai_links[1].platform_of_node = dai_node;
+		priv->dai_links[1].name = "TWL6040";
+		priv->dai_links[1].stream_name = "DMIC Capture";
+		priv->dai_links[1].cpu_of_node = dai_node;
+		priv->dai_links[1].platform_of_node = dai_node;
+		priv->dai_links[1].codec_dai_name = "dmic-hifi";
+		priv->dai_links[1].codec_name = "dmic-codec";
+		priv->dai_links[1].init = omap_abe_dmic_init;
+		priv->dai_links[1].ops = &omap_abe_dmic_ops;
 	} else {
 		num_links = 1;
 	}
@@ -300,7 +291,7 @@ static int omap_abe_probe(struct platform_device *pdev)
 		return -ENODEV;
 	}
 
-	card->dai_link = abe_twl6040_dai_links;
+	card->dai_link = priv->dai_links;
 	card->num_links = num_links;
 
 	snd_soc_card_set_drvdata(card, priv);
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index fe96627..cba9645 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -48,6 +48,8 @@ struct omap_dmic {
 	struct device *dev;
 	void __iomem *io_base;
 	struct clk *fclk;
+	struct pm_qos_request pm_qos_req;
+	int latency;
 	int fclk_freq;
 	int out_freq;
 	int clk_div;
@@ -124,6 +126,8 @@ static void omap_dmic_dai_shutdown(struct snd_pcm_substream *substream,
 
 	mutex_lock(&dmic->mutex);
 
+	pm_qos_remove_request(&dmic->pm_qos_req);
+
 	if (!dai->active)
 		dmic->active = 0;
 
@@ -228,6 +232,8 @@ static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream,
 	/* packet size is threshold * channels */
 	dma_data = snd_soc_dai_get_dma_data(dai, substream);
 	dma_data->maxburst = dmic->threshold * channels;
+	dmic->latency = (OMAP_DMIC_THRES_MAX - dmic->threshold) * USEC_PER_SEC /
+			params_rate(params);
 
 	return 0;
 }
@@ -238,6 +244,9 @@ static int omap_dmic_dai_prepare(struct snd_pcm_substream *substream,
 	struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
 	u32 ctrl;
 
+	if (pm_qos_request_active(&dmic->pm_qos_req))
+		pm_qos_update_request(&dmic->pm_qos_req, dmic->latency);
+
 	/* Configure uplink threshold */
 	omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold);
 
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index d0ebb6b..2d6decb 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -308,9 +308,9 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
 			pkt_size = channels;
 		}
 
-		latency = ((((buffer_size - pkt_size) / channels) * 1000)
-				 / (params->rate_num / params->rate_den));
-
+		latency = (buffer_size - pkt_size) / channels;
+		latency = latency * USEC_PER_SEC /
+			  (params->rate_num / params->rate_den);
 		mcbsp->latency[substream->stream] = latency;
 
 		omap_mcbsp_set_threshold(substream, pkt_size);
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 4c1be36..7d5bdc5 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -54,6 +54,8 @@ struct omap_mcpdm {
 	unsigned long phys_base;
 	void __iomem *io_base;
 	int irq;
+	struct pm_qos_request pm_qos_req;
+	int latency[2];
 
 	struct mutex mutex;
 
@@ -277,6 +279,9 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
 				  struct snd_soc_dai *dai)
 {
 	struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+	int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+	int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
+	int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
 
 	mutex_lock(&mcpdm->mutex);
 
@@ -289,6 +294,14 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
 		}
 	}
 
+	if (mcpdm->latency[stream2])
+		pm_qos_update_request(&mcpdm->pm_qos_req,
+				      mcpdm->latency[stream2]);
+	else if (mcpdm->latency[stream1])
+		pm_qos_remove_request(&mcpdm->pm_qos_req);
+
+	mcpdm->latency[stream1] = 0;
+
 	mutex_unlock(&mcpdm->mutex);
 }
 
@@ -300,7 +313,7 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
 	int stream = substream->stream;
 	struct snd_dmaengine_dai_dma_data *dma_data;
 	u32 threshold;
-	int channels;
+	int channels, latency;
 	int link_mask = 0;
 
 	channels = params_channels(params);
@@ -344,14 +357,25 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
 
 		dma_data->maxburst =
 				(MCPDM_DN_THRES_MAX - threshold) * channels;
+		latency = threshold;
 	} else {
 		/* If playback is not running assume a stereo stream to come */
 		if (!mcpdm->config[!stream].link_mask)
 			mcpdm->config[!stream].link_mask = (0x3 << 3);
 
 		dma_data->maxburst = threshold * channels;
+		latency = (MCPDM_DN_THRES_MAX - threshold);
 	}
 
+	/*
+	 * The DMA must act to a DMA request within latency time (usec) to avoid
+	 * under/overflow
+	 */
+	mcpdm->latency[stream] = latency * USEC_PER_SEC / params_rate(params);
+
+	if (!mcpdm->latency[stream])
+		mcpdm->latency[stream] = 10;
+
 	/* Check if we need to restart McPDM with this stream */
 	if (mcpdm->config[stream].link_mask &&
 	    mcpdm->config[stream].link_mask != link_mask)
@@ -366,6 +390,20 @@ static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
 				  struct snd_soc_dai *dai)
 {
 	struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+	struct pm_qos_request *pm_qos_req = &mcpdm->pm_qos_req;
+	int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+	int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
+	int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+	int latency = mcpdm->latency[stream2];
+
+	/* Prevent omap hardware from hitting off between FIFO fills */
+	if (!latency || mcpdm->latency[stream1] < latency)
+		latency = mcpdm->latency[stream1];
+
+	if (pm_qos_request_active(pm_qos_req))
+		pm_qos_update_request(pm_qos_req, latency);
+	else if (latency)
+		pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
 
 	if (!omap_mcpdm_active(mcpdm)) {
 		omap_mcpdm_start(mcpdm);
@@ -427,6 +465,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
 	free_irq(mcpdm->irq, (void *)mcpdm);
 	pm_runtime_disable(mcpdm->dev);
 
+	if (pm_qos_request_active(&mcpdm->pm_qos_req))
+		pm_qos_remove_request(&mcpdm->pm_qos_req);
+
 	return 0;
 }
 
diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
index eb1b9da..4715527 100644
--- a/sound/soc/qcom/common.c
+++ b/sound/soc/qcom/common.c
@@ -13,6 +13,7 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
 	struct device_node *cpu = NULL;
 	struct device *dev = card->dev;
 	struct snd_soc_dai_link *link;
+	struct of_phandle_args args;
 	int ret, num_links;
 
 	ret = snd_soc_of_parse_card_name(card, "model");
@@ -47,12 +48,14 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
 			goto err;
 		}
 
-		link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
-		if (!link->cpu_of_node) {
+		ret = of_parse_phandle_with_args(cpu, "sound-dai",
+					"#sound-dai-cells", 0, &args);
+		if (ret) {
 			dev_err(card->dev, "error getting cpu phandle\n");
-			ret = -EINVAL;
 			goto err;
 		}
+		link->cpu_of_node = args.np;
+		link->id = args.args[0];
 
 		ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
 		if (ret) {
diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
index 60ff4a2..8f6c8fc 100644
--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
+++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
@@ -1112,204 +1112,204 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
 }
 
 static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
-	SND_SOC_DAPM_AIF_OUT("HDMI_RX", "HDMI Playback", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SLIMBUS_2_TX", "Slimbus2 Capture", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SLIMBUS_3_TX", "Slimbus3 Capture", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SLIMBUS_5_TX", "Slimbus5 Capture", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SLIMBUS_6_TX", "Slimbus6 Capture", 0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback",
+	SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture",
+	SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback",
+	SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture",
+	SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback",
+	SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture",
+	SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1",
+	SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
 			"Secondary MI2S Playback SD1",
 			0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback",
+	SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture",
+	SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
 						0, 0, 0, 0),
 
-	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_0", "Primary TDM0 Playback",
+	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_1", "Primary TDM1 Playback",
+	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_2", "Primary TDM2 Playback",
+	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_3", "Primary TDM3 Playback",
+	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_4", "Primary TDM4 Playback",
+	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_5", "Primary TDM5 Playback",
+	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_6", "Primary TDM6 Playback",
+	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_7", "Primary TDM7 Playback",
+	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_0", "Primary TDM0 Capture",
+	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_1", "Primary TDM1 Capture",
+	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_2", "Primary TDM2 Capture",
+	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_3", "Primary TDM3 Capture",
+	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_4", "Primary TDM4 Capture",
+	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_5", "Primary TDM5 Capture",
+	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_6", "Primary TDM6 Capture",
+	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_7", "Primary TDM7 Capture",
+	SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
 						0, 0, 0, 0),
 
-	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_0", "Secondary TDM0 Playback",
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_1", "Secondary TDM1 Playback",
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_2", "Secondary TDM2 Playback",
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_3", "Secondary TDM3 Playback",
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_4", "Secondary TDM4 Playback",
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_5", "Secondary TDM5 Playback",
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_6", "Secondary TDM6 Playback",
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_7", "Secondary TDM7 Playback",
+	SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_0", "Secondary TDM0 Capture",
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_1", "Secondary TDM1 Capture",
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_2", "Secondary TDM2 Capture",
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_3", "Secondary TDM3 Capture",
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_4", "Secondary TDM4 Capture",
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_5", "Secondary TDM5 Capture",
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_6", "Secondary TDM6 Capture",
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_7", "Secondary TDM7 Capture",
+	SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
 						0, 0, 0, 0),
 
-	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_0", "Tertiary TDM0 Playback",
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_1", "Tertiary TDM1 Playback",
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_2", "Tertiary TDM2 Playback",
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_3", "Tertiary TDM3 Playback",
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_4", "Tertiary TDM4 Playback",
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_5", "Tertiary TDM5 Playback",
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_6", "Tertiary TDM6 Playback",
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_7", "Tertiary TDM7 Playback",
+	SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_0", "Tertiary TDM0 Capture",
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_1", "Tertiary TDM1 Capture",
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_2", "Tertiary TDM2 Capture",
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_3", "Tertiary TDM3 Capture",
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_4", "Tertiary TDM4 Capture",
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_5", "Tertiary TDM5 Capture",
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_6", "Tertiary TDM6 Capture",
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_7", "Tertiary TDM7 Capture",
+	SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
 						0, 0, 0, 0),
 
-	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_0", "Quaternary TDM0 Playback",
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_1", "Quaternary TDM1 Playback",
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_2", "Quaternary TDM2 Playback",
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_3", "Quaternary TDM3 Playback",
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_4", "Quaternary TDM4 Playback",
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_5", "Quaternary TDM5 Playback",
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_6", "Quaternary TDM6 Playback",
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_7", "Quaternary TDM7 Playback",
+	SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_0", "Quaternary TDM0 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_1", "Quaternary TDM1 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_2", "Quaternary TDM2 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_3", "Quaternary TDM3 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_4", "Quaternary TDM4 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_5", "Quaternary TDM5 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_6", "Quaternary TDM6 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_7", "Quaternary TDM7 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
 						0, 0, 0, 0),
 
-	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_0", "Quinary TDM0 Playback",
+	SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_1", "Quinary TDM1 Playback",
+	SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_2", "Quinary TDM2 Playback",
+	SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_3", "Quinary TDM3 Playback",
+	SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_4", "Quinary TDM4 Playback",
+	SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_5", "Quinary TDM5 Playback",
+	SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_6", "Quinary TDM6 Playback",
+	SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_7", "Quinary TDM7 Playback",
+	SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
 			     0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_0", "Quinary TDM0 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_1", "Quinary TDM1 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_2", "Quinary TDM2 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_3", "Quinary TDM3 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_4", "Quinary TDM4 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_5", "Quinary TDM5 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_6", "Quinary TDM6 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
 						0, 0, 0, 0),
-	SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_7", "Quinary TDM7 Capture",
+	SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
 						0, 0, 0, 0),
 };
 
diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
index 000775b..829b5e987 100644
--- a/sound/soc/qcom/qdsp6/q6afe.c
+++ b/sound/soc/qcom/qdsp6/q6afe.c
@@ -49,14 +49,14 @@
 #define AFE_PORT_I2S_SD1		0x2
 #define AFE_PORT_I2S_SD2		0x3
 #define AFE_PORT_I2S_SD3		0x4
-#define AFE_PORT_I2S_SD0_MASK		BIT(0x1)
-#define AFE_PORT_I2S_SD1_MASK		BIT(0x2)
-#define AFE_PORT_I2S_SD2_MASK		BIT(0x3)
-#define AFE_PORT_I2S_SD3_MASK		BIT(0x4)
-#define AFE_PORT_I2S_SD0_1_MASK		GENMASK(2, 1)
-#define AFE_PORT_I2S_SD2_3_MASK		GENMASK(4, 3)
-#define AFE_PORT_I2S_SD0_1_2_MASK	GENMASK(3, 1)
-#define AFE_PORT_I2S_SD0_1_2_3_MASK	GENMASK(4, 1)
+#define AFE_PORT_I2S_SD0_MASK		BIT(0x0)
+#define AFE_PORT_I2S_SD1_MASK		BIT(0x1)
+#define AFE_PORT_I2S_SD2_MASK		BIT(0x2)
+#define AFE_PORT_I2S_SD3_MASK		BIT(0x3)
+#define AFE_PORT_I2S_SD0_1_MASK		GENMASK(1, 0)
+#define AFE_PORT_I2S_SD2_3_MASK		GENMASK(3, 2)
+#define AFE_PORT_I2S_SD0_1_2_MASK	GENMASK(2, 0)
+#define AFE_PORT_I2S_SD0_1_2_3_MASK	GENMASK(3, 0)
 #define AFE_PORT_I2S_QUAD01		0x5
 #define AFE_PORT_I2S_QUAD23		0x6
 #define AFE_PORT_I2S_6CHS		0x7
diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
index f775383..7029e0b 100644
--- a/sound/soc/rockchip/rockchip_pcm.c
+++ b/sound/soc/rockchip/rockchip_pcm.c
@@ -32,6 +32,7 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
 
 static const struct snd_dmaengine_pcm_config rk_dmaengine_pcm_config = {
 	.pcm_hardware = &snd_rockchip_hardware,
+	.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
 	.prealloc_buffer_size = 32 * 1024,
 };
 
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 3f880ec..a566dae 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
 	if (rsnd_ssi_is_multi_slave(mod, io))
 		return 0;
 
-	if (ssi->rate) {
+	if (ssi->usrcnt > 1) {
 		if (ssi->rate != rate) {
 			dev_err(dev, "SSI parent/child should use same rate\n");
 			return -EINVAL;
diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
index b8e72b5..4fb29f0 100644
--- a/sound/soc/soc-acpi.c
+++ b/sound/soc/soc-acpi.c
@@ -10,11 +10,17 @@ struct snd_soc_acpi_mach *
 snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
 {
 	struct snd_soc_acpi_mach *mach;
+	struct snd_soc_acpi_mach *mach_alt;
 
 	for (mach = machines; mach->id[0]; mach++) {
 		if (acpi_dev_present(mach->id, NULL, -1)) {
-			if (mach->machine_quirk)
-				mach = mach->machine_quirk(mach);
+			if (mach->machine_quirk) {
+				mach_alt = mach->machine_quirk(mach);
+				if (!mach_alt)
+					continue; /* not full match, ignore */
+				mach = mach_alt;
+			}
+
 			return mach;
 		}
 	}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 0712d85..727a1b4 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2143,6 +2143,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
 	}
 
 	card->instantiated = 1;
+	dapm_mark_endpoints_dirty(card);
 	snd_soc_dapm_sync(&card->dapm);
 	mutex_unlock(&card->mutex);
 	mutex_unlock(&client_mutex);
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index fb37dd9..bf615fa 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -589,16 +589,10 @@ static int sun8i_codec_probe(struct platform_device *pdev)
 
 static int sun8i_codec_remove(struct platform_device *pdev)
 {
-	struct snd_soc_card *card = platform_get_drvdata(pdev);
-	struct sun8i_codec *scodec = snd_soc_card_get_drvdata(card);
-
 	pm_runtime_disable(&pdev->dev);
 	if (!pm_runtime_status_suspended(&pdev->dev))
 		sun8i_codec_runtime_suspend(&pdev->dev);
 
-	clk_disable_unprepare(scodec->clk_module);
-	clk_disable_unprepare(scodec->clk_bus);
-
 	return 0;
 }
 
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 08aa780..1c73b9e 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3387,5 +3387,15 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
 		.ifnum = QUIRK_NO_INTERFACE
 	}
 },
+/* Dell WD19 Dock */
+{
+	USB_DEVICE(0x0bda, 0x402e),
+	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+		.vendor_name = "Dell",
+		.product_name = "WD19 Dock",
+		.profile_name = "Dell-WD15-Dock",
+		.ifnum = QUIRK_NO_INTERFACE
+	}
+},
 
 #undef USB_DEVICE_VENDOR_SPEC
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index b3a0709..fcaf006 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -304,7 +304,7 @@ char *get_fdinfo(int fd, const char *key)
 		return NULL;
 	}
 
-	while ((n = getline(&line, &line_n, fdi))) {
+	while ((n = getline(&line, &line_n, fdi)) > 0) {
 		char *value;
 		int len;
 
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index dce960d..0de024a 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -749,6 +749,7 @@ static int do_load(int argc, char **argv)
 			}
 			NEXT_ARG();
 		} else if (is_prefix(*argv, "map")) {
+			void *new_map_replace;
 			char *endptr, *name;
 			int fd;
 
@@ -782,12 +783,15 @@ static int do_load(int argc, char **argv)
 			if (fd < 0)
 				goto err_free_reuse_maps;
 
-			map_replace = reallocarray(map_replace, old_map_fds + 1,
-						   sizeof(*map_replace));
-			if (!map_replace) {
+			new_map_replace = reallocarray(map_replace,
+						       old_map_fds + 1,
+						       sizeof(*map_replace));
+			if (!new_map_replace) {
 				p_err("mem alloc failed");
 				goto err_free_reuse_maps;
 			}
+			map_replace = new_map_replace;
+
 			map_replace[old_map_fds].idx = idx;
 			map_replace[old_map_fds].name = name;
 			map_replace[old_map_fds].fd = fd;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 7ec85d5..b75d004 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -31,6 +31,8 @@
 #include "elf.h"
 #include "warn.h"
 
+#define MAX_NAME_LEN 128
+
 struct section *find_section_by_name(struct elf *elf, const char *name)
 {
 	struct section *sec;
@@ -298,6 +300,8 @@ static int read_symbols(struct elf *elf)
 	/* Create parent/child links for any cold subfunctions */
 	list_for_each_entry(sec, &elf->sections, list) {
 		list_for_each_entry(sym, &sec->symbol_list, list) {
+			char pname[MAX_NAME_LEN + 1];
+			size_t pnamelen;
 			if (sym->type != STT_FUNC)
 				continue;
 			sym->pfunc = sym->cfunc = sym;
@@ -305,14 +309,21 @@ static int read_symbols(struct elf *elf)
 			if (!coldstr)
 				continue;
 
-			coldstr[0] = '\0';
-			pfunc = find_symbol_by_name(elf, sym->name);
-			coldstr[0] = '.';
+			pnamelen = coldstr - sym->name;
+			if (pnamelen > MAX_NAME_LEN) {
+				WARN("%s(): parent function name exceeds maximum length of %d characters",
+				     sym->name, MAX_NAME_LEN);
+				return -1;
+			}
+
+			strncpy(pname, sym->name, pnamelen);
+			pname[pnamelen] = '\0';
+			pfunc = find_symbol_by_name(elf, pname);
 
 			if (!pfunc) {
 				WARN("%s(): can't find parent function",
 				     sym->name);
-				goto err;
+				return -1;
 			}
 
 			sym->pfunc = pfunc;
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index 3794066..efd0157 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -9,7 +9,7 @@
 config=0
 sample_period=*
 sample_type=263
-read_format=0
+read_format=0|4
 disabled=1
 inherit=1
 pinned=0
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 03a7231..e7dbdcc 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1088,7 +1088,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
 		attr->exclude_user   = 1;
 	}
 
-	if (evsel->own_cpus)
+	if (evsel->own_cpus || evsel->unit)
 		evsel->attr.read_format |= PERF_FORMAT_ID;
 
 	/*
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
index cf8bd12..aed170b 100644
--- a/tools/perf/util/namespaces.c
+++ b/tools/perf/util/namespaces.c
@@ -18,6 +18,7 @@
 #include <stdio.h>
 #include <string.h>
 #include <unistd.h>
+#include <asm/bug.h>
 
 struct namespaces *namespaces__new(struct namespaces_event *event)
 {
@@ -186,6 +187,7 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
 	char curpath[PATH_MAX];
 	int oldns = -1;
 	int newns = -1;
+	char *oldcwd = NULL;
 
 	if (nc == NULL)
 		return;
@@ -199,9 +201,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
 	if (snprintf(curpath, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX)
 		return;
 
+	oldcwd = get_current_dir_name();
+	if (!oldcwd)
+		return;
+
 	oldns = open(curpath, O_RDONLY);
 	if (oldns < 0)
-		return;
+		goto errout;
 
 	newns = open(nsi->mntns_path, O_RDONLY);
 	if (newns < 0)
@@ -210,11 +216,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
 	if (setns(newns, CLONE_NEWNS) < 0)
 		goto errout;
 
+	nc->oldcwd = oldcwd;
 	nc->oldns = oldns;
 	nc->newns = newns;
 	return;
 
 errout:
+	free(oldcwd);
 	if (oldns > -1)
 		close(oldns);
 	if (newns > -1)
@@ -223,11 +231,16 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
 
 void nsinfo__mountns_exit(struct nscookie *nc)
 {
-	if (nc == NULL || nc->oldns == -1 || nc->newns == -1)
+	if (nc == NULL || nc->oldns == -1 || nc->newns == -1 || !nc->oldcwd)
 		return;
 
 	setns(nc->oldns, CLONE_NEWNS);
 
+	if (nc->oldcwd) {
+		WARN_ON_ONCE(chdir(nc->oldcwd));
+		zfree(&nc->oldcwd);
+	}
+
 	if (nc->oldns > -1) {
 		close(nc->oldns);
 		nc->oldns = -1;
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
index cae1a9a..d5f46c0 100644
--- a/tools/perf/util/namespaces.h
+++ b/tools/perf/util/namespaces.h
@@ -38,6 +38,7 @@ struct nsinfo {
 struct nscookie {
 	int			oldns;
 	int			newns;
+	char			*oldcwd;
 };
 
 int nsinfo__init(struct nsinfo *nsi);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index f1fe492..f0017c8 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -24,6 +24,7 @@
 TARGETS += mount
 TARGETS += mqueue
 TARGETS += net
+TARGETS += netfilter
 TARGETS += nsfs
 TARGETS += powerpc
 TARGETS += proc
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 67c412d..2bde9ee 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -12511,6 +12511,25 @@ static struct bpf_test tests[] = {
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 		.result = ACCEPT,
 	},
+	{
+		"calls: ctx read at start of subprog",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+			BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
new file mode 100644
index 0000000..47ed6ce
--- /dev/null
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for netfilter selftests
+
+TEST_PROGS := nft_trans_stress.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
new file mode 100644
index 0000000..1017313
--- /dev/null
+++ b/tools/testing/selftests/netfilter/config
@@ -0,0 +1,2 @@
+CONFIG_NET_NS=y
+NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_trans_stress.sh b/tools/testing/selftests/netfilter/nft_trans_stress.sh
new file mode 100755
index 0000000..f1affd1
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_trans_stress.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+#
+# This test is for stress-testing the nf_tables config plane path vs.
+# packet path processing: Make sure we never release rules that are
+# still visible to other cpus.
+#
+# set -e
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+testns=testns1
+tables="foo bar baz quux"
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+	echo "SKIP: Could not run test without nft tool"
+	exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+	echo "SKIP: Could not run test without ip tool"
+	exit $ksft_skip
+fi
+
+tmp=$(mktemp)
+
+for table in $tables; do
+	echo add table inet "$table" >> "$tmp"
+	echo flush table inet "$table" >> "$tmp"
+
+	echo "add chain inet $table INPUT { type filter hook input priority 0; }" >> "$tmp"
+	echo "add chain inet $table OUTPUT { type filter hook output priority 0; }" >> "$tmp"
+	for c in $(seq 1 400); do
+		chain=$(printf "chain%03u" "$c")
+		echo "add chain inet $table $chain" >> "$tmp"
+	done
+
+	for c in $(seq 1 400); do
+		chain=$(printf "chain%03u" "$c")
+		for BASE in INPUT OUTPUT; do
+			echo "add rule inet $table $BASE counter jump $chain" >> "$tmp"
+		done
+		echo "add rule inet $table $chain counter return" >> "$tmp"
+	done
+done
+
+ip netns add "$testns"
+ip -netns "$testns" link set lo up
+
+lscpu | grep ^CPU\(s\): | ( read cpu cpunum ;
+cpunum=$((cpunum-1))
+for i in $(seq 0 $cpunum);do
+	mask=$(printf 0x%x $((1<<$i)))
+        ip netns exec "$testns" taskset $mask ping -4 127.0.0.1 -fq > /dev/null &
+        ip netns exec "$testns" taskset $mask ping -6 ::1 -fq > /dev/null &
+done)
+
+sleep 1
+
+for i in $(seq 1 10) ; do ip netns exec "$testns" nft -f "$tmp" & done
+
+for table in $tables;do
+	randsleep=$((RANDOM%10))
+	sleep $randsleep
+	ip netns exec "$testns" nft delete table inet $table 2>/dev/null
+done
+
+randsleep=$((RANDOM%10))
+sleep $randsleep
+
+pkill -9 ping
+
+wait
+
+rm -f "$tmp"
+ip netns del "$testns"
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
index 6f1f4a6..85744425 100644
--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
+++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
@@ -13,7 +13,7 @@
  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
-/* Test readlink /proc/self/map_files/... with address 0. */
+/* Test readlink /proc/self/map_files/... with minimum address. */
 #include <errno.h>
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -47,6 +47,11 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
 int main(void)
 {
 	const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
+#ifdef __arm__
+	unsigned long va = 2 * PAGE_SIZE;
+#else
+	unsigned long va = 0;
+#endif
 	void *p;
 	int fd;
 	unsigned long a, b;
@@ -55,7 +60,7 @@ int main(void)
 	if (fd == -1)
 		return 1;
 
-	p = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
+	p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
 	if (p == MAP_FAILED) {
 		if (errno == EPERM)
 			return 2;