Merge "drm/msm/sde: Add support for gamut programming with AHB" into msm-4.8
diff --git a/Documentation/devicetree/bindings/dma/sps/sps.txt b/Documentation/devicetree/bindings/dma/sps/sps.txt
new file mode 100644
index 0000000..92dda7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sps/sps.txt
@@ -0,0 +1,47 @@
+SPS (Smart Peripheral Switch) may be used as a DMA engine to move data
+in either the Peripheral-to-Peripheral (a.k.a. BAM-to-BAM) mode or the
+Peripheral-to-Memory (a.k.a. BAM-System) mode. SPS includes BAM (Bus
+Access Module) hardware block, BAM DMA peripheral, and pipe memory.
+
+Required property:
+  - compatible: should be "qcom,msm_sps" or "qcom,msm_sps_4k"
+
+Optional properties:
+  - reg: offset and size for the memory mapping, including maps for
+    BAM DMA BAM, BAM DMA peripheral, pipe memory and reserved memory.
+  - reg-names: indicates various resources passed to driver (via reg
+    property) by name. "reg-names" examples are "bam_mem", "core_mem"
+    , "pipe_mem" and "res_mem".
+  - interrupts: IRQ line
+  - qcom,device-type: specify the device configuration of BAM DMA and
+    pipe memory. Can be one of
+        1 - With BAM DMA and without pipe memory
+        2 - With BAM DMA and with pipe memory
+        3 - Without BAM DMA and without pipe memory
+  - qcom,pipe-attr-ee: BAM pipes are attributed to a specific EE, with
+    which we can know the pipes belong to apps side and can have the
+    error interrupts at the pipe level.
+  - clocks: This property shall provide a list of entries each of which
+    contains a phandle to clock controller device and a macro that is
+    the clock's name in hardware.These should be "clock_rpm" as clock
+    controller phandle and "clk_pnoc_sps_clk" as macro for "dfab_clk"
+    and "clock_gcc" as clock controller phandle and "clk_gcc_bam_dma_ahb_clk"
+    as macro for "dma_bam_pclk".
+  - clock-names: This property shall contain the clock input names used
+    by driver in same order as the clocks property.These should be "dfab_clk"
+    and "dma_bam_pclk".
+
+Example:
+
+	qcom,sps@f9980000 {
+		compatible = "qcom,msm_sps";
+		reg = <0xf9984000 0x15000>,
+		      <0xf9999000 0xb000>,
+		      <0xfe803000 0x4800>;
+		interrupts = <0 94 0>;
+		qcom,device-type = <2>;
+		qcom,pipe-attr-ee;
+		clocks = <&clock_rpm clk_pnoc_sps_clk>,
+			 <&clock_gcc clk_gcc_bam_dma_ahb_clk>;
+		clock-names = "dfab_clk", "dma_bam_pclk";
+	};
diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt
new file mode 100644
index 0000000..a50e0c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt
@@ -0,0 +1,288 @@
+MSM PCIe
+
+MSM PCI express root complex
+
+Required properties:
+  - compatible: should be "qcom,pci-msm"
+  - cell-index: defines root complex ID.
+  - #address-cells: Should provide a value of 0.
+  - reg: should contain PCIe register maps.
+  - reg-names: indicates various resources passed to driver by name.
+		Should be "parf", "phy", "dm_core", "elbi", "conf", "io", "bars".
+		These correspond to different modules within the PCIe core.
+  - ranges: For details of ranges properties, please refer to:
+    "Documentation\devicetree\bindings\pci\pci.txt"
+  - interrupts: Should be in the format <0 1 2> and it is an index to the
+		interrupt-map that contains PCIe related interrupts.
+  - #interrupt-cells: Should provide a value of 1.
+  - #interrupt-map-mask: should provide a value of 0xffffffff.
+  - interrupt-map:  Must create mapping for the number of interrupts
+		    that are defined in above interrupts property.
+		    For PCIe device node, it should define 12 mappings for
+		    the corresponding PCIe interrupts supporting the specification.
+  - interrupt-names: indicates interrupts passed to driver by name.
+		     Should be "int_msi", "int_a", "int_b", "int_c", "int_d",
+		     "int_pls_pme", "int_pme_legacy", "int_pls_err",
+		     "int_aer_legacy", "int_pls_link_up",
+		     "int_pls_link_down", "int_bridge_flush_n",
+		     "msi_0", "msi_1", "msi_2", "msi_3",
+		     "msi_4", "msi_5", "msi_6", "msi_7",
+		     "msi_8", "msi_9", "msi_10", "msi_11",
+		     "msi_12", "msi_13", "msi_14", "msi_15",
+		     "msi_16", "msi_17", "msi_18", "msi_19",
+		     "msi_20", "msi_21", "msi_22", "msi_23",
+		     "msi_24", "msi_25", "msi_26", "msi_27",
+		     "msi_28", "msi_29", "msi_30", "msi_31"
+		     These correspond to the standard PCIe specification to support
+		     MSIs, virtual IRQ's (INT#), link state notifications.
+  - perst-gpio: PERST GPIO specified by PCIe spec.
+  - wake-gpio: WAKE GPIO specified by PCIe spec.
+  - <supply-name>-supply: phandle to the regulator device tree node.
+    Refer to the schematics for the corresponding voltage regulators.
+    vreg-1.8-supply: phandle to the analog supply for the PCIe controller.
+    vreg-3.3-supply: phandle to the analog supply for the PCIe controller.
+    vreg-0.9-supply: phandle to the analog supply for the PCIe controller.
+
+Optional Properties:
+  - qcom,<supply-name>-voltage-level: specifies voltage levels for supply.
+    Should be specified in pairs (max, min, optimal), units uV.
+  - clkreq-gpio: CLKREQ GPIO specified by PCIe spec.
+  - qcom,ep-gpio: GPIO which enables a certain type of endpoint for link training.
+  - pinctrl-names: The state name of the pin configuration.
+    supports: "default", "sleep"
+  - pinctrl-0: For details of pinctrl properties, please refer to:
+    "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+  - pinctrl-1: For details of pinctrl properties, please refer to:
+    "Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt"
+  - clocks: list of clock phandles
+  - clock-names: list of names of clock inputs.
+		     Should be "pcie_0_pipe_clk", "pcie_0_ref_clk_src",
+				"pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
+				"pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
+				"pcie_0_ldo";
+  - max-clock-frequency-hz: list of the maximum operating frequencies stored
+				in the same order of clock names;
+  - qcom,l0s-supported: L0s is supported.
+  - qcom,l1-supported: L1 is supported.
+  - qcom,l1ss-supported: L1 sub-states (L1ss) is supported.
+  - qcom,aux-clk-sync: The AUX clock is synchronous to the Core clock to
+    support L1ss.
+  - qcom,common-clk-en: Enables the common clock configuration for the endpoint.
+  - qcom,clk-power-manage-en: Enables the clock power management for the
+    endpoint.
+  - qcom,n-fts: The number of fast training sequences sent when the link state
+    is changed from L0s to L0.
+  - qcom,pcie-phy-ver: version of PCIe PHY.
+  - qcom,phy-sequence: The initialization sequence to bring up the PCIe PHY.
+    Should be specified in groups (offset, value, delay).
+  - qcom,port-phy-sequence: The initialization sequence to bring up the
+    PCIe port PHY.
+    Should be specified in groups (offset, value, delay).
+  - qcom,use-19p2mhz-aux-clk: The frequency of PCIe AUX clock is 19.2MHz.
+  - qcom,ep-wakeirq: The endpoint will issue wake signal when it is up, and the
+    root complex has the capability to enumerate the endpoint for this case.
+  - qcom,msi-gicm-addr: MSI address for GICv2m.
+  - qcom,msi-gicm-base: MSI IRQ base for GICv2m.
+  - qcom,ext-ref-clk: The reference clock is external.
+  - iommus: the phandle and stream IDs for the SMMU used by this root
+    complex. This should be used in separate nodes from the main root
+    complex nodes, and is the only property needed in that case.
+  - qcom,common-phy: There is a common phy for all the Root Complexes.
+  - qcom,smmu-exist: PCIe uses a SMMU.
+  - qcom,smmu-sid-base: The base SMMU SID that PCIe bus driver will use to calculate
+    and assign for each endpoint.
+  - qcom,ep-latency: The time (unit: ms) to wait for the PCIe endpoint to become
+    stable after power on, before de-assert the PERST to the endpoint.
+  - qcom,wr-halt-size: With base 2, this exponent determines the size of the
+    data that PCIe core will halt on for each write transaction.
+  - qcom,cpl-timeout: Completion timeout value. This value specifies the time range
+    which the root complex will send out a completion packet if there is no response
+    from the endpoint.
+  - linux,pci-domain: For details of pci-domains properties, please refer to:
+    "Documentation/devicetree/bindings/pci/pci.txt"
+  - qcom,perst-delay-us-min: The minimum allowed time (unit: us) to sleep after
+    asserting or de-asserting PERST GPIO.
+  - qcom,perst-delay-us-max: The maximum allowed time (unit: us) to sleep after
+    asserting or de-asserting PERST GPIO.
+  - qcom,tlp-rd-size: The max TLP read size (Calculation: 128 times 2 to the
+    tlp-rd-size power).
+  - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
+    below optional properties:
+	- qcom,msm-bus,name
+	- qcom,msm-bus,num-cases
+	- qcom,msm-bus,num-paths
+	- qcom,msm-bus,vectors-KBps
+  - qcom,scm-dev-id: If present then device id value is passed to secure channel
+	manager(scm) driver. scm driver uses this device id to restore PCIe
+	controller related security configuration after coming out of the controller
+	power collapse.
+  - resets: reset specifier pair consists of phandle for the reset controller
+    and reset lines used by this controller.
+  - reset-names: reset signal name strings sorted in the same order as the resets
+    property.
+
+Example:
+
+	pcie0: qcom,pcie@fc520000 {
+		compatible = "qcom,msm_pcie";
+		cell-index = <0>;
+		#address-cells = <0>;
+		reg = <0xfc520000 0x2000>,
+		      <0xfc526000 0x1000>,
+		      <0xff000000 0x1000>,
+		      <0xff001000 0x1000>,
+		      <0xff100000 0x1000>,
+		      <0xff200000 0x100000>,
+		      <0xff300000 0xd00000>;
+		reg-names = "parf", "dm_core", "elbi",
+				"conf", "io", "bars";
+		ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
+			<0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+		interrupt-parent = <&pcie0>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11
+				12 13 14 15 16 17 18 19 20
+				21 22 23 24 25 26 27 28 29
+				30 31 32 33 34 35 36 37 38
+				39 40 41 42 43>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0xffffffff>;
+		interrupt-map = <0x0 0x0 0x0 0 &intc 0 405 0
+				0x0 0x0 0x0 1 &intc 0 244 0
+				0x0 0x0 0x0 2 &intc 0 245 0
+				0x0 0x0 0x0 3 &intc 0 247 0
+				0x0 0x0 0x0 4 &intc 0 248 0
+				0x0 0x0 0x0 5 &intc 0 249 0
+				0x0 0x0 0x0 6 &intc 0 250 0
+				0x0 0x0 0x0 7 &intc 0 251 0
+				0x0 0x0 0x0 8 &intc 0 252 0
+				0x0 0x0 0x0 9 &intc 0 253 0
+				0x0 0x0 0x0 10 &intc 0 254 0
+				0x0 0x0 0x0 11 &intc 0 255 0
+				0x0 0x0 0x0 12 &intc 0 448 0
+				0x0 0x0 0x0 13 &intc 0 449 0
+				0x0 0x0 0x0 14 &intc 0 450 0
+				0x0 0x0 0x0 15 &intc 0 451 0
+				0x0 0x0 0x0 16 &intc 0 452 0
+				0x0 0x0 0x0 17 &intc 0 453 0
+				0x0 0x0 0x0 18 &intc 0 454 0
+				0x0 0x0 0x0 19 &intc 0 455 0
+				0x0 0x0 0x0 20 &intc 0 456 0
+				0x0 0x0 0x0 21 &intc 0 457 0
+				0x0 0x0 0x0 22 &intc 0 458 0
+				0x0 0x0 0x0 23 &intc 0 459 0
+				0x0 0x0 0x0 24 &intc 0 460 0
+				0x0 0x0 0x0 25 &intc 0 461 0
+				0x0 0x0 0x0 26 &intc 0 462 0
+				0x0 0x0 0x0 27 &intc 0 463 0
+				0x0 0x0 0x0 28 &intc 0 464 0
+				0x0 0x0 0x0 29 &intc 0 465 0
+				0x0 0x0 0x0 30 &intc 0 466 0
+				0x0 0x0 0x0 31 &intc 0 467 0
+				0x0 0x0 0x0 32 &intc 0 468 0
+				0x0 0x0 0x0 33 &intc 0 469 0
+				0x0 0x0 0x0 34 &intc 0 470 0
+				0x0 0x0 0x0 35 &intc 0 471 0
+				0x0 0x0 0x0 36 &intc 0 472 0
+				0x0 0x0 0x0 37 &intc 0 473 0
+				0x0 0x0 0x0 38 &intc 0 474 0
+				0x0 0x0 0x0 39 &intc 0 475 0
+				0x0 0x0 0x0 40 &intc 0 476 0
+				0x0 0x0 0x0 41 &intc 0 477 0
+				0x0 0x0 0x0 42 &intc 0 478 0
+				0x0 0x0 0x0 43 &intc 0 479 0>;
+		interrupt-names = "int_msi", "int_a", "int_b", "int_c", "int_d",
+				"int_pls_pme", "int_pme_legacy", "int_pls_err",
+				"int_aer_legacy", "int_pls_link_up",
+				"int_pls_link_down", "int_bridge_flush_n",
+				"msi_0", "msi_1", "msi_2", "msi_3",
+				"msi_4", "msi_5", "msi_6", "msi_7",
+				"msi_8", "msi_9", "msi_10", "msi_11",
+				"msi_12", "msi_13", "msi_14", "msi_15",
+				"msi_16", "msi_17", "msi_18", "msi_19",
+				"msi_20", "msi_21", "msi_22", "msi_23",
+				"msi_24", "msi_25", "msi_26", "msi_27",
+				"msi_28", "msi_29", "msi_30", "msi_31";
+
+		qcom,phy-sequence = <0x804 0x01 0x00
+					0x034 0x14 0x00
+					0x138 0x30 0x00
+					0x048 0x0f 0x00
+					0x15c 0x06 0x00
+					0x090 0x01 0x00
+					0x808 0x03 0x00>;
+		qcom,port-phy-sequence = <0x804 0x01 0x00
+					0x034 0x14 0x00
+					0x138 0x30 0x00
+					0x048 0x0f 0x00
+					0x15c 0x06 0x00
+					0x090 0x01 0x00
+					0x808 0x03 0x00>;
+		perst-gpio = <&msmgpio 70 0>;
+		wake-gpio = <&msmgpio 69 0>;
+		clkreq-gpio = <&msmgpio 68 0>;
+		qcom,ep-gpio = <&tlmm 94 0>;
+
+		gdsc-vdd-supply = <&gdsc_pcie_0>;
+		vreg-1.8-supply = <&pma8084_l12>;
+		vreg-0.9-supply = <&pma8084_l4>;
+		vreg-3.3-supply = <&wlan_vreg>;
+
+		qcom,vreg-1.8-voltage-level = <1800000 1800000 1000>;
+		qcom,vreg-0.9-voltage-level = <950000 950000 24000>;
+
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&pcie0_clkreq_default &pcie0_perst_default &pcie0_wake_default>;
+		pinctrl-1 = <&pcie0_clkreq_sleep &pcie0_perst_sleep &pcie0_wake_sleep>;
+
+		clocks = <&clock_gcc clk_gcc_pcie_0_pipe_clk>,
+			<&clock_rpm clk_ln_bb_clk>,
+			<&clock_gcc clk_gcc_pcie_0_aux_clk>,
+			<&clock_gcc clk_gcc_pcie_0_cfg_ahb_clk>,
+			<&clock_gcc clk_gcc_pcie_0_mstr_axi_clk>,
+			<&clock_gcc clk_gcc_pcie_0_slv_axi_clk>,
+			<&clock_gcc clk_pcie_0_phy_ldo>;
+
+		clock-names = "pcie_0_pipe_clk", "pcie_0_ref_clk_src",
+				"pcie_0_aux_clk", "pcie_0_cfg_ahb_clk",
+				"pcie_0_mstr_axi_clk", "pcie_0_slv_axi_clk",
+				"pcie_0_ldo";
+
+		resets = <&clock_gcc GCC_PCIE_PHY_BCR>,
+			<&clock_gcc GCC_PCIE_PHY_COM_BCR>,
+			<&clock_gcc GCC_PCIE_PHY_NOCSR_COM_PHY_BCR>,
+			<&clock_gcc GCC_PCIE_0_PHY_BCR>;
+
+		reset-names = "pcie_phy_reset", "pcie_phy_com_reset",
+				"pcie_phy_nocsr_com_phy_reset","pcie_0_phy_reset";
+
+		max-clock-frequency-hz = <125000000>, <0>, <1000000>,
+						<0>, <0>, <0>, <0>;
+		qcom,l0s-supported;
+		qcom,l1-supported;
+		qcom,l1ss-supported;
+		qcom,aux-clk-sync;
+		qcom,n-fts = <0x50>;
+		qcom,pcie-phy-ver = <1>;
+		qcom,ep-wakeirq;
+		qcom,msi-gicm-addr = <0xf9040040>;
+		qcom,msi-gicm-base = <0x160>;
+		qcom,ext-ref-clk;
+		qcom,tlp-rd-size = <0x5>;
+		qcom,common-phy;
+		qcom,smmu-exist;
+		qcom,smmu-sid-base = <0x1480>;
+		qcom,ep-latency = <100>;
+		qcom,wr-halt-size = <0xa>; /* 1KB */
+		qcom,cpl-timeout = <0x2>;
+
+		iommus = <&anoc0_smmu>;
+
+		qcom,msm-bus,name = "pcie0";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<45 512 0 0>,
+				<45 512 500 800>;
+
+		qcom,scm-dev-id = <11>;
+	};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk.dtsi b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
index c0862f5..822ebf1 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk.dtsi
+++ b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
@@ -1327,6 +1327,11 @@
 		};
 	};
 
+	qcom,sps {
+		compatible = "qcom,msm_sps_4k";
+		qcom,pipe-attr-ee;
+	};
+
 	qcom,msm_gsi {
 		compatible = "qcom,msm_gsi";
 	};
diff --git a/arch/arm64/configs/msmskunk-perf_defconfig b/arch/arm64/configs/msmskunk-perf_defconfig
index 97fbad8..cf9adb7 100644
--- a/arch/arm64/configs/msmskunk-perf_defconfig
+++ b/arch/arm64/configs/msmskunk-perf_defconfig
@@ -326,6 +326,8 @@
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_ION=y
 CONFIG_ION_MSM=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_MSM_GCC_SKUNK=y
 CONFIG_MSM_VIDEOCC_SKUNK=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
@@ -387,6 +389,7 @@
 CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
 CONFIG_CORESIGHT_QCOM_REPLICATOR=y
 CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
diff --git a/arch/arm64/configs/msmskunk_defconfig b/arch/arm64/configs/msmskunk_defconfig
index 4d2e028..c2a3c6b 100644
--- a/arch/arm64/configs/msmskunk_defconfig
+++ b/arch/arm64/configs/msmskunk_defconfig
@@ -337,6 +337,8 @@
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_ION=y
 CONFIG_ION_MSM=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_MSM_GCC_SKUNK=y
 CONFIG_MSM_VIDEOCC_SKUNK=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
@@ -435,6 +437,7 @@
 CONFIG_CORESIGHT_SOURCE_ETM4X=y
 CONFIG_CORESIGHT_QCOM_REPLICATOR=y
 CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index b9a7ba9..ddd57a7 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -39,3 +39,8 @@
 
 #endif  /* __KERNEL__ */
 #endif  /* __ASM_PCI_H */
+
+#ifdef CONFIG_PCI_MSM
+#define arch_setup_msi_irqs arch_setup_msi_irqs
+#define arch_teardown_msi_irqs arch_teardown_msi_irqs
+#endif
diff --git a/drivers/clk/qcom/videocc-msmskunk.c b/drivers/clk/qcom/videocc-msmskunk.c
index adfece6..670efb52 100644
--- a/drivers/clk/qcom/videocc-msmskunk.c
+++ b/drivers/clk/qcom/videocc-msmskunk.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -68,8 +68,8 @@
 };
 
 static const struct pll_config video_pll0_config = {
-	.l = 0x15,
-	.frac = 0xaab,
+	.l = 0x10,
+	.frac = 0xaaab,
 };
 
 static struct clk_alpha_pll video_pll0 = {
@@ -85,8 +85,8 @@
 			.ops = &clk_fabia_pll_ops,
 			VDD_CX_FMAX_MAP5(
 				MIN, 200000000,
-				LOW, 660000000,
-				LOW_L1, 1212000000,
+				LOW, 640000000,
+				LOW_L1, 760000000,
 				NOMINAL, 1332000000,
 				HIGH, 1599000000),
 
@@ -95,10 +95,10 @@
 };
 
 static const struct freq_tbl ftbl_video_cc_venus_clk_src[] = {
-	F(101000000, P_VIDEO_PLL0_OUT_MAIN, 4, 0, 0),
-	F(202000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
-	F(269333333, P_VIDEO_PLL0_OUT_MAIN, 1.5, 0, 0),
-	F(404000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	F(100000000, P_VIDEO_PLL0_OUT_MAIN, 4, 0, 0),
+	F(200000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+	F(320000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	F(380000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	F(444000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	F(533000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
 	{ }
@@ -120,8 +120,8 @@
 		VDD_CX_FMAX_MAP6(
 			MIN, 100000000,
 			LOWER, 200000000,
-			LOW, 330000000,
-			LOW_L1, 404000000,
+			LOW, 320000000,
+			LOW_L1, 380000000,
 			NOMINAL, 444000000,
 			HIGH, 533000000),
 	},
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index e616338..6a00919 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -289,7 +289,8 @@
 		.major = 1,
 		.minor = 2,
 		.patchid = ANY_ID,
-		.features = ADRENO_64BIT,
+		.features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION |
+			ADRENO_CPZ_RETENTION,
 		.pm4fw_name = "a530_pm4.fw",
 		.pfpfw_name = "a530_pfp.fw",
 		.gpudev = &adreno_a5xx_gpudev,
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 9fd7cb4..c1e0a70 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -943,11 +943,13 @@
 	a5xx_snapshot_debugbus(device, snapshot);
 
 	/* Preemption record */
-	FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
-		kgsl_snapshot_add_section(device,
-			KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
-			snapshot, snapshot_preemption_record,
-			&rb->preemption_desc);
+	if (adreno_is_preemption_enabled(adreno_dev)) {
+		FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2,
+				snapshot, snapshot_preemption_record,
+				&rb->preemption_desc);
+		}
 	}
 
 }
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index 6ecbab4..bb92b8b 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -299,13 +299,49 @@
 	struct page *p = NULL;
 	int order = get_order(*page_size);
 	int pool_idx;
+	size_t size = 0;
 
 	if ((pages == NULL) || pages_len < (*page_size >> PAGE_SHIFT))
 		return -EINVAL;
 
+	/* If the pool is not configured get pages from the system */
+	if (!kgsl_num_pools) {
+		gfp_t gfp_mask = kgsl_gfp_mask(order);
+
+		page = alloc_pages(gfp_mask, order);
+		if (page == NULL) {
+			/* Retry with lower order pages */
+			if (order > 0) {
+				size = PAGE_SIZE << --order;
+				goto eagain;
+
+			} else
+				return -ENOMEM;
+		}
+		_kgsl_pool_zero_page(page, order);
+		goto done;
+	}
+
 	pool = _kgsl_get_pool_from_order(order);
-	if (pool == NULL)
-		return -EINVAL;
+	if (pool == NULL) {
+		/* Retry with lower order pages */
+		if (order > 0) {
+			size = PAGE_SIZE << --order;
+			goto eagain;
+		} else {
+			/*
+			 * Fall back to direct allocation in case
+			 * pool with zero order is not present
+			 */
+			gfp_t gfp_mask = kgsl_gfp_mask(order);
+
+			page = alloc_pages(gfp_mask, order);
+			if (page == NULL)
+				return -ENOMEM;
+			_kgsl_pool_zero_page(page, order);
+			goto done;
+		}
+	}
 
 	pool_idx = kgsl_pool_idx_lookup(order);
 	page = _kgsl_pool_get_page(pool);
@@ -316,10 +352,9 @@
 
 		/* Only allocate non-reserved memory for certain pools */
 		if (!pool->allocation_allowed && pool_idx > 0) {
-			*page_size = PAGE_SIZE <<
+			size = PAGE_SIZE <<
 					kgsl_pools[pool_idx-1].pool_order;
-			*align = ilog2(*page_size);
-			return -EAGAIN;
+			goto eagain;
 		}
 
 		page = alloc_pages(gfp_mask, order);
@@ -327,10 +362,9 @@
 		if (!page) {
 			if (pool_idx > 0) {
 				/* Retry with lower order pages */
-				*page_size = PAGE_SIZE <<
+				size = PAGE_SIZE <<
 					kgsl_pools[pool_idx-1].pool_order;
-				*align = ilog2(*page_size);
-				return -EAGAIN;
+				goto eagain;
 			} else
 				return -ENOMEM;
 		}
@@ -338,6 +372,7 @@
 		_kgsl_pool_zero_page(page, order);
 	}
 
+done:
 	for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) {
 		p = nth_page(page, j);
 		pages[pcount] = p;
@@ -345,6 +380,12 @@
 	}
 
 	return pcount;
+
+eagain:
+	*page_size = kgsl_get_page_size(size,
+			ilog2(size));
+	*align = ilog2(*page_size);
+	return -EAGAIN;
 }
 
 void kgsl_pool_free_page(struct page *page)
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index c43c210..ea13419 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -2012,6 +2012,42 @@
 	return clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx], rate);
 }
 
+static inline void _close_pcl(struct kgsl_pwrctrl *pwr)
+{
+	if (pwr->pcl)
+		msm_bus_scale_unregister_client(pwr->pcl);
+
+	pwr->pcl = 0;
+}
+
+static inline void _close_ocmem_pcl(struct kgsl_pwrctrl *pwr)
+{
+	if (pwr->ocmem_pcl)
+		msm_bus_scale_unregister_client(pwr->ocmem_pcl);
+
+	pwr->ocmem_pcl = 0;
+}
+
+static inline void _close_regulators(struct kgsl_pwrctrl *pwr)
+{
+	int i;
+
+	for (i = 0; i < KGSL_MAX_REGULATORS; i++)
+		pwr->regulators[i].reg = NULL;
+}
+
+static inline void _close_clks(struct kgsl_device *device)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int i;
+
+	for (i = 0; i < KGSL_MAX_CLKS; i++)
+		pwr->grp_clks[i] = NULL;
+
+	if (pwr->gpu_bimc_int_clk)
+		devm_clk_put(&device->pdev->dev, pwr->gpu_bimc_int_clk);
+}
+
 int kgsl_pwrctrl_init(struct kgsl_device *device)
 {
 	int i, k, m, n = 0, result;
@@ -2029,7 +2065,7 @@
 
 	result = _get_clocks(device);
 	if (result)
-		return result;
+		goto error_cleanup_clks;
 
 	/* Make sure we have a source clk for freq setting */
 	if (pwr->grp_clks[0] == NULL)
@@ -2047,7 +2083,8 @@
 
 	if (pwr->num_pwrlevels == 0) {
 		KGSL_PWR_ERR(device, "No power levels are defined\n");
-		return -EINVAL;
+		result = -EINVAL;
+		goto error_cleanup_clks;
 	}
 
 	/* Initialize the user and thermal clock constraints */
@@ -2077,7 +2114,7 @@
 
 	result = get_regulators(device);
 	if (result)
-		return result;
+		goto error_cleanup_regulators;
 
 	pwr->power_flags = 0;
 
@@ -2097,8 +2134,10 @@
 			pwr->ocmem_pcl = msm_bus_scale_register_client
 					(ocmem_scale_table);
 
-		if (!pwr->ocmem_pcl)
-			return -EINVAL;
+		if (!pwr->ocmem_pcl) {
+			result = -EINVAL;
+			goto error_disable_pm;
+		}
 	}
 
 	/* Bus width in bytes, set it to zero if not found */
@@ -2128,14 +2167,18 @@
 		 * from the driver.
 		 */
 		pwr->pcl = msm_bus_scale_register_client(bus_scale_table);
-		if (pwr->pcl == 0)
-			return -EINVAL;
+		if (pwr->pcl == 0) {
+			result = -EINVAL;
+			goto error_cleanup_ocmem_pcl;
+		}
 	}
 
 	pwr->bus_ib = kzalloc(bus_scale_table->num_usecases *
 		sizeof(*pwr->bus_ib), GFP_KERNEL);
-	if (pwr->bus_ib == NULL)
-		return -ENOMEM;
+	if (pwr->bus_ib == NULL) {
+		result = -ENOMEM;
+		goto error_cleanup_pcl;
+	}
 
 	/*
 	 * Pull the BW vote out of the bus table.  They will be used to
@@ -2194,36 +2237,26 @@
 		&pwr->tzone_name);
 
 	return result;
+
+error_cleanup_pcl:
+	_close_pcl(pwr);
+error_cleanup_ocmem_pcl:
+	_close_ocmem_pcl(pwr);
+error_disable_pm:
+	pm_runtime_disable(&pdev->dev);
+error_cleanup_regulators:
+	_close_regulators(pwr);
+error_cleanup_clks:
+	_close_clks(device);
+	return result;
 }
 
 void kgsl_pwrctrl_close(struct kgsl_device *device)
 {
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-	int i;
 
 	KGSL_PWR_INFO(device, "close device %d\n", device->id);
 
-	pm_runtime_disable(&device->pdev->dev);
-
-	if (pwr->pcl)
-		msm_bus_scale_unregister_client(pwr->pcl);
-
-	pwr->pcl = 0;
-
-	if (pwr->ocmem_pcl)
-		msm_bus_scale_unregister_client(pwr->ocmem_pcl);
-
-	pwr->ocmem_pcl = 0;
-
-	for (i = 0; i < KGSL_MAX_REGULATORS; i++)
-		pwr->regulators[i].reg = NULL;
-
-	for (i = 0; i < KGSL_MAX_REGULATORS; i++)
-		pwr->grp_clks[i] = NULL;
-
-	if (pwr->gpu_bimc_int_clk)
-		devm_clk_put(&device->pdev->dev, pwr->gpu_bimc_int_clk);
-
 	pwr->power_flags = 0;
 
 	if (!IS_ERR_OR_NULL(pwr->sysfs_pwr_limit)) {
@@ -2232,6 +2265,16 @@
 		pwr->sysfs_pwr_limit = NULL;
 	}
 	kfree(pwr->bus_ib);
+
+	_close_pcl(pwr);
+
+	_close_ocmem_pcl(pwr);
+
+	pm_runtime_disable(&device->pdev->dev);
+
+	_close_regulators(pwr);
+
+	_close_clks(device);
 }
 
 /**
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 01aac1e..73e6c53 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -613,25 +613,6 @@
 }
 EXPORT_SYMBOL(kgsl_cache_range_op);
 
-#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
-static inline int get_page_size(size_t size, unsigned int align)
-{
-	if (align >= ilog2(SZ_1M) && size >= SZ_1M)
-		return SZ_1M;
-	else if (align >= ilog2(SZ_64K) && size >= SZ_64K)
-		return SZ_64K;
-	else if (align >= ilog2(SZ_8K) && size >= SZ_8K)
-		return SZ_8K;
-	else
-		return PAGE_SIZE;
-}
-#else
-static inline int get_page_size(size_t size, unsigned int align)
-{
-	return PAGE_SIZE;
-}
-#endif
-
 int
 kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
 			uint64_t size)
@@ -648,7 +629,7 @@
 
 	align = (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
 
-	page_size = get_page_size(size, align);
+	page_size = kgsl_get_page_size(size, align);
 
 	/*
 	 * The alignment cannot be less than the intended page size - it can be
@@ -719,7 +700,7 @@
 		memdesc->page_count += page_count;
 
 		/* Get the needed page size for the next iteration */
-		page_size = get_page_size(len, align);
+		page_size = kgsl_get_page_size(len, align);
 	}
 
 	/* Call to the hypervisor to lock any secure buffer allocations */
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 1ef31ef..10b37ae 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -365,4 +365,30 @@
 	}
 }
 
+/**
+ * kgsl_get_page_size() - Get supported pagesize
+ * @size: Size of the page
+ * @align: Desired alignment of the size
+ *
+ * Return supported pagesize
+ */
+#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
+static inline int kgsl_get_page_size(size_t size, unsigned int align)
+{
+	if (align >= ilog2(SZ_1M) && size >= SZ_1M)
+		return SZ_1M;
+	else if (align >= ilog2(SZ_64K) && size >= SZ_64K)
+		return SZ_64K;
+	else if (align >= ilog2(SZ_8K) && size >= SZ_8K)
+		return SZ_8K;
+	else
+		return PAGE_SIZE;
+}
+#else
+static inline int kgsl_get_page_size(size_t size, unsigned int align)
+{
+	return PAGE_SIZE;
+}
+#endif
+
 #endif /* __KGSL_SHAREDMEM_H */
diff --git a/drivers/mfd/msm-cdc-supply.c b/drivers/mfd/msm-cdc-supply.c
index 36c58e0..9c7ebf78 100644
--- a/drivers/mfd/msm-cdc-supply.c
+++ b/drivers/mfd/msm-cdc-supply.c
@@ -386,19 +386,19 @@
 	}
 	static_sup_cnt = of_property_count_strings(dev->of_node,
 						   static_prop_name);
-	if (IS_ERR_VALUE(static_sup_cnt)) {
+	if (static_sup_cnt < 0) {
 		dev_err(dev, "%s: Failed to get static supplies(%d)\n",
 			__func__, static_sup_cnt);
 		rc = static_sup_cnt;
 		goto err_supply_cnt;
 	}
 	ond_sup_cnt = of_property_count_strings(dev->of_node, ond_prop_name);
-	if (IS_ERR_VALUE(ond_sup_cnt))
+	if (ond_sup_cnt < 0)
 		ond_sup_cnt = 0;
 
 	cp_sup_cnt = of_property_count_strings(dev->of_node,
 					       cp_prop_name);
-	if (IS_ERR_VALUE(cp_sup_cnt))
+	if (cp_sup_cnt < 0)
 		cp_sup_cnt = 0;
 
 	num_supplies = static_sup_cnt + ond_sup_cnt + cp_sup_cnt;
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 084cb49..9afa97e 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -7,6 +7,7 @@
 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
 obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
 obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
+obj-$(CONFIG_PCI_MSM)	+= pci-msm.o
 obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
 obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
 obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
new file mode 100644
index 0000000..fdbbe41
--- /dev/null
+++ b/drivers/pci/host/pci-msm.c
@@ -0,0 +1,7098 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * MSM PCIe controller driver.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of_gpio.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/reset.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/msi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/pm_wakeup.h>
+#include <linux/compiler.h>
+#include <soc/qcom/scm.h>
+#include <linux/ipc_logging.h>
+#include <linux/msm_pcie.h>
+
+#ifdef CONFIG_ARCH_MDMCALIFORNIUM
+#define PCIE_VENDOR_ID_RCP		0x17cb
+#define PCIE_DEVICE_ID_RCP		0x0302
+
+#define PCIE20_L1SUB_CONTROL1		0x158
+#define PCIE20_PARF_DBI_BASE_ADDR	0x350
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE	0x358
+
+#define TX_BASE 0x200
+#define RX_BASE 0x400
+#define PCS_BASE 0x800
+#define PCS_MISC_BASE 0x600
+
+#elif defined(CONFIG_ARCH_MSM8998)
+#define PCIE_VENDOR_ID_RCP		0x17cb
+#define PCIE_DEVICE_ID_RCP		0x0105
+
+#define PCIE20_L1SUB_CONTROL1		0x1E4
+#define PCIE20_PARF_DBI_BASE_ADDR       0x350
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x358
+
+#define TX_BASE 0
+#define RX_BASE 0
+#define PCS_BASE 0x800
+#define PCS_MISC_BASE 0
+
+#else
+#define PCIE_VENDOR_ID_RCP		0x17cb
+#define PCIE_DEVICE_ID_RCP		0x0104
+
+#define PCIE20_L1SUB_CONTROL1		0x158
+#define PCIE20_PARF_DBI_BASE_ADDR	0x168
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE	0x16C
+
+#define TX_BASE 0x1000
+#define RX_BASE 0x1200
+#define PCS_BASE 0x1400
+#define PCS_MISC_BASE 0
+#endif
+
+#define TX(n, m) (TX_BASE + n * m * 0x1000)
+#define RX(n, m) (RX_BASE + n * m * 0x1000)
+#define PCS_PORT(n, m) (PCS_BASE + n * m * 0x1000)
+#define PCS_MISC_PORT(n, m) (PCS_MISC_BASE + n * m * 0x1000)
+
+#define QSERDES_COM_BG_TIMER			0x00C
+#define QSERDES_COM_SSC_EN_CENTER		0x010
+#define QSERDES_COM_SSC_ADJ_PER1		0x014
+#define QSERDES_COM_SSC_ADJ_PER2		0x018
+#define QSERDES_COM_SSC_PER1			0x01C
+#define QSERDES_COM_SSC_PER2			0x020
+#define QSERDES_COM_SSC_STEP_SIZE1		0x024
+#define QSERDES_COM_SSC_STEP_SIZE2		0x028
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x034
+#define QSERDES_COM_CLK_ENABLE1			0x038
+#define QSERDES_COM_SYS_CLK_CTRL		0x03C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x040
+#define QSERDES_COM_PLL_IVCO			0x048
+#define QSERDES_COM_LOCK_CMP1_MODE0		0x04C
+#define QSERDES_COM_LOCK_CMP2_MODE0		0x050
+#define QSERDES_COM_LOCK_CMP3_MODE0		0x054
+#define QSERDES_COM_BG_TRIM			0x070
+#define QSERDES_COM_CLK_EP_DIV			0x074
+#define QSERDES_COM_CP_CTRL_MODE0		0x078
+#define QSERDES_COM_PLL_RCTRL_MODE0		0x084
+#define QSERDES_COM_PLL_CCTRL_MODE0		0x090
+#define QSERDES_COM_SYSCLK_EN_SEL		0x0AC
+#define QSERDES_COM_RESETSM_CNTRL		0x0B4
+#define QSERDES_COM_RESTRIM_CTRL		0x0BC
+#define QSERDES_COM_RESCODE_DIV_NUM		0x0C4
+#define QSERDES_COM_LOCK_CMP_EN			0x0C8
+#define QSERDES_COM_DEC_START_MODE0		0x0D0
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x0DC
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x0E0
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x0E4
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x108
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x10C
+#define QSERDES_COM_VCO_TUNE_CTRL		0x124
+#define QSERDES_COM_VCO_TUNE_MAP		0x128
+#define QSERDES_COM_VCO_TUNE1_MODE0		0x12C
+#define QSERDES_COM_VCO_TUNE2_MODE0		0x130
+#define QSERDES_COM_VCO_TUNE_TIMER1		0x144
+#define QSERDES_COM_VCO_TUNE_TIMER2		0x148
+#define QSERDES_COM_BG_CTRL			0x170
+#define QSERDES_COM_CLK_SELECT			0x174
+#define QSERDES_COM_HSCLK_SEL			0x178
+#define QSERDES_COM_CORECLK_DIV			0x184
+#define QSERDES_COM_CORE_CLK_EN			0x18C
+#define QSERDES_COM_C_READY_STATUS		0x190
+#define QSERDES_COM_CMN_CONFIG			0x194
+#define QSERDES_COM_SVS_MODE_CLK_SEL		0x19C
+#define QSERDES_COM_DEBUG_BUS0			0x1A0
+#define QSERDES_COM_DEBUG_BUS1			0x1A4
+#define QSERDES_COM_DEBUG_BUS2			0x1A8
+#define QSERDES_COM_DEBUG_BUS3			0x1AC
+#define QSERDES_COM_DEBUG_BUS_SEL		0x1B0
+
+#define QSERDES_TX_N_RES_CODE_LANE_OFFSET(n, m)		(TX(n, m) + 0x4C)
+#define QSERDES_TX_N_DEBUG_BUS_SEL(n, m)		(TX(n, m) + 0x64)
+#define QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(n, m) (TX(n, m) + 0x68)
+#define QSERDES_TX_N_LANE_MODE(n, m)			(TX(n, m) + 0x94)
+#define QSERDES_TX_N_RCV_DETECT_LVL_2(n, m)		(TX(n, m) + 0xAC)
+
+#define QSERDES_RX_N_UCDR_SO_GAIN_HALF(n, m)		(RX(n, m) + 0x010)
+#define QSERDES_RX_N_UCDR_SO_GAIN(n, m)			(RX(n, m) + 0x01C)
+#define QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(n, m) (RX(n, m) + 0x048)
+#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(n, m)	(RX(n, m) + 0x0D8)
+#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(n, m)	(RX(n, m) + 0x0DC)
+#define QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(n, m)	(RX(n, m) + 0x0E0)
+#define QSERDES_RX_N_SIGDET_ENABLES(n, m)		(RX(n, m) + 0x110)
+#define QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(n, m)	(RX(n, m) + 0x11C)
+#define QSERDES_RX_N_SIGDET_LVL(n, m)			(RX(n, m) + 0x118)
+#define QSERDES_RX_N_RX_BAND(n, m)			(RX(n, m) + 0x120)
+
+#define PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x00)
+#define PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x04)
+#define PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x08)
+#define PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(n, m)	(PCS_MISC_PORT(n, m) + 0x0C)
+#define PCIE_MISC_N_DEBUG_BUS_0_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x14)
+#define PCIE_MISC_N_DEBUG_BUS_1_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x18)
+#define PCIE_MISC_N_DEBUG_BUS_2_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x1C)
+#define PCIE_MISC_N_DEBUG_BUS_3_STATUS(n, m)	(PCS_MISC_PORT(n, m) + 0x20)
+
+#define PCIE_N_SW_RESET(n, m)			(PCS_PORT(n, m) + 0x00)
+#define PCIE_N_POWER_DOWN_CONTROL(n, m)		(PCS_PORT(n, m) + 0x04)
+#define PCIE_N_START_CONTROL(n, m)		(PCS_PORT(n, m) + 0x08)
+#define PCIE_N_TXDEEMPH_M6DB_V0(n, m)		(PCS_PORT(n, m) + 0x24)
+#define PCIE_N_TXDEEMPH_M3P5DB_V0(n, m)		(PCS_PORT(n, m) + 0x28)
+#define PCIE_N_ENDPOINT_REFCLK_DRIVE(n, m)	(PCS_PORT(n, m) + 0x54)
+#define PCIE_N_RX_IDLE_DTCT_CNTRL(n, m)		(PCS_PORT(n, m) + 0x58)
+#define PCIE_N_POWER_STATE_CONFIG1(n, m)	(PCS_PORT(n, m) + 0x60)
+#define PCIE_N_POWER_STATE_CONFIG4(n, m)	(PCS_PORT(n, m) + 0x6C)
+#define PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(n, m)	(PCS_PORT(n, m) + 0xA0)
+#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(n, m)	(PCS_PORT(n, m) + 0xA4)
+#define PCIE_N_PLL_LOCK_CHK_DLY_TIME(n, m)	(PCS_PORT(n, m) + 0xA8)
+#define PCIE_N_TEST_CONTROL4(n, m)		(PCS_PORT(n, m) + 0x11C)
+#define PCIE_N_TEST_CONTROL5(n, m)		(PCS_PORT(n, m) + 0x120)
+#define PCIE_N_TEST_CONTROL6(n, m)		(PCS_PORT(n, m) + 0x124)
+#define PCIE_N_TEST_CONTROL7(n, m)		(PCS_PORT(n, m) + 0x128)
+#define PCIE_N_PCS_STATUS(n, m)			(PCS_PORT(n, m) + 0x174)
+#define PCIE_N_DEBUG_BUS_0_STATUS(n, m)		(PCS_PORT(n, m) + 0x198)
+#define PCIE_N_DEBUG_BUS_1_STATUS(n, m)		(PCS_PORT(n, m) + 0x19C)
+#define PCIE_N_DEBUG_BUS_2_STATUS(n, m)		(PCS_PORT(n, m) + 0x1A0)
+#define PCIE_N_DEBUG_BUS_3_STATUS(n, m)		(PCS_PORT(n, m) + 0x1A4)
+#define PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m)	(PCS_PORT(n, m) + 0x1A8)
+#define PCIE_N_OSC_DTCT_ACTIONS(n, m)			(PCS_PORT(n, m) + 0x1AC)
+#define PCIE_N_SIGDET_CNTRL(n, m)			(PCS_PORT(n, m) + 0x1B0)
+#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(n, m)	(PCS_PORT(n, m) + 0x1DC)
+#define PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(n, m)	(PCS_PORT(n, m) + 0x1E0)
+
+#define PCIE_COM_SW_RESET		0x400
+#define PCIE_COM_POWER_DOWN_CONTROL	0x404
+#define PCIE_COM_START_CONTROL		0x408
+#define PCIE_COM_DEBUG_BUS_BYTE0_INDEX	0x438
+#define PCIE_COM_DEBUG_BUS_BYTE1_INDEX	0x43C
+#define PCIE_COM_DEBUG_BUS_BYTE2_INDEX	0x440
+#define PCIE_COM_DEBUG_BUS_BYTE3_INDEX	0x444
+#define PCIE_COM_PCS_READY_STATUS	0x448
+#define PCIE_COM_DEBUG_BUS_0_STATUS	0x45C
+#define PCIE_COM_DEBUG_BUS_1_STATUS	0x460
+#define PCIE_COM_DEBUG_BUS_2_STATUS	0x464
+#define PCIE_COM_DEBUG_BUS_3_STATUS	0x468
+
+#define PCIE20_PARF_SYS_CTRL	     0x00
+#define PCIE20_PARF_PM_STTS		0x24
+#define PCIE20_PARF_PCS_DEEMPH	   0x34
+#define PCIE20_PARF_PCS_SWING	    0x38
+#define PCIE20_PARF_PHY_CTRL	     0x40
+#define PCIE20_PARF_PHY_REFCLK	   0x4C
+#define PCIE20_PARF_CONFIG_BITS	  0x50
+#define PCIE20_PARF_TEST_BUS		0xE4
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT   0x1A8
+#define PCIE20_PARF_LTSSM              0x1B0
+#define PCIE20_PARF_INT_ALL_STATUS	0x224
+#define PCIE20_PARF_INT_ALL_CLEAR	0x228
+#define PCIE20_PARF_INT_ALL_MASK	0x22C
+#define PCIE20_PARF_SID_OFFSET		0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG	0x24C
+#define PCIE20_PARF_BDF_TRANSLATE_N	0x250
+
+#define PCIE20_ELBI_VERSION		0x00
+#define PCIE20_ELBI_SYS_CTRL	     0x04
+#define PCIE20_ELBI_SYS_STTS		 0x08
+
+#define PCIE20_CAP			   0x70
+#define PCIE20_CAP_DEVCTRLSTATUS	(PCIE20_CAP + 0x08)
+#define PCIE20_CAP_LINKCTRLSTATUS	(PCIE20_CAP + 0x10)
+
+#define PCIE20_COMMAND_STATUS	    0x04
+#define PCIE20_HEADER_TYPE		0x0C
+#define PCIE20_BUSNUMBERS		  0x18
+#define PCIE20_MEMORY_BASE_LIMIT	 0x20
+#define PCIE20_BRIDGE_CTRL		0x3C
+#define PCIE20_DEVICE_CONTROL_STATUS	0x78
+#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
+
+#define PCIE20_AUX_CLK_FREQ_REG		0xB40
+#define PCIE20_ACK_F_ASPM_CTRL_REG     0x70C
+#define PCIE20_ACK_N_FTS		   0xff00
+
+#define PCIE20_PLR_IATU_VIEWPORT	 0x900
+#define PCIE20_PLR_IATU_CTRL1	    0x904
+#define PCIE20_PLR_IATU_CTRL2	    0x908
+#define PCIE20_PLR_IATU_LBAR	     0x90C
+#define PCIE20_PLR_IATU_UBAR	     0x910
+#define PCIE20_PLR_IATU_LAR		0x914
+#define PCIE20_PLR_IATU_LTAR	     0x918
+#define PCIE20_PLR_IATU_UTAR	     0x91c
+
+#define PCIE20_CTRL1_TYPE_CFG0		0x04
+#define PCIE20_CTRL1_TYPE_CFG1		0x05
+
+#define PCIE20_CAP_ID			0x10
+#define L1SUB_CAP_ID			0x1E
+
+#define PCIE_CAP_PTR_OFFSET		0x34
+#define PCIE_EXT_CAP_OFFSET		0x100
+
+#define PCIE20_AER_UNCORR_ERR_STATUS_REG	0x104
+#define PCIE20_AER_CORR_ERR_STATUS_REG		0x110
+#define PCIE20_AER_ROOT_ERR_STATUS_REG		0x130
+#define PCIE20_AER_ERR_SRC_ID_REG		0x134
+
+#define RD 0
+#define WR 1
+#define MSM_PCIE_ERROR -1
+
+#define PERST_PROPAGATION_DELAY_US_MIN	  1000
+#define PERST_PROPAGATION_DELAY_US_MAX	  1005
+#define REFCLK_STABILIZATION_DELAY_US_MIN     1000
+#define REFCLK_STABILIZATION_DELAY_US_MAX     1005
+#define LINK_UP_TIMEOUT_US_MIN		    5000
+#define LINK_UP_TIMEOUT_US_MAX		    5100
+#define LINK_UP_CHECK_MAX_COUNT		   20
+#define PHY_STABILIZATION_DELAY_US_MIN	  995
+#define PHY_STABILIZATION_DELAY_US_MAX	  1005
+#define POWER_DOWN_DELAY_US_MIN		10
+#define POWER_DOWN_DELAY_US_MAX		11
+#define LINKDOWN_INIT_WAITING_US_MIN    995
+#define LINKDOWN_INIT_WAITING_US_MAX    1005
+#define LINKDOWN_WAITING_US_MIN	   4900
+#define LINKDOWN_WAITING_US_MAX	   5100
+#define LINKDOWN_WAITING_COUNT	    200
+
+#define PHY_READY_TIMEOUT_COUNT		   10
+#define XMLH_LINK_UP				  0x400
+#define MAX_LINK_RETRIES 5
+#define MAX_BUS_NUM 3
+#define MAX_PROP_SIZE 32
+#define MAX_RC_NAME_LEN 15
+#define MSM_PCIE_MAX_VREG 4
+#define MSM_PCIE_MAX_CLK 9
+#define MSM_PCIE_MAX_PIPE_CLK 1
+#define MAX_RC_NUM 3
+#define MAX_DEVICE_NUM 20
+#define MAX_SHORT_BDF_NUM 16
+#define PCIE_TLP_RD_SIZE 0x5
+#define PCIE_MSI_NR_IRQS 256
+#define MSM_PCIE_MAX_MSI 32
+#define MAX_MSG_LEN 80
+#define PCIE_LOG_PAGES (50)
+#define PCIE_CONF_SPACE_DW			1024
+#define PCIE_CLEAR				0xDEADBEEF
+#define PCIE_LINK_DOWN				0xFFFFFFFF
+
+#define MSM_PCIE_MAX_RESET 4
+#define MSM_PCIE_MAX_PIPE_RESET 1
+
+#define MSM_PCIE_MSI_PHY 0xa0000000
+#define PCIE20_MSI_CTRL_ADDR		(0x820)
+#define PCIE20_MSI_CTRL_UPPER_ADDR	(0x824)
+#define PCIE20_MSI_CTRL_INTR_EN	   (0x828)
+#define PCIE20_MSI_CTRL_INTR_MASK	 (0x82C)
+#define PCIE20_MSI_CTRL_INTR_STATUS     (0x830)
+#define PCIE20_MSI_CTRL_MAX 8
+
+/* PM control options */
+#define PM_IRQ			 0x1
+#define PM_CLK			 0x2
+#define PM_GPIO			0x4
+#define PM_VREG			0x8
+#define PM_PIPE_CLK		  0x10
+#define PM_ALL (PM_IRQ | PM_CLK | PM_GPIO | PM_VREG | PM_PIPE_CLK)
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
+#else
+#define PCIE_UPPER_ADDR(addr) (0x0)
+#endif
+#define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
+
+/* Config Space Offsets */
+#define BDF_OFFSET(bus, devfn) \
+	((bus << 24) | (devfn << 16))
+
+#define PCIE_GEN_DBG(x...) do { \
+	if (msm_pcie_debug_mask) \
+		pr_alert(x); \
+	} while (0)
+
+#define PCIE_DBG(dev, fmt, arg...) do {			 \
+	if ((dev) && (dev)->ipc_log_long)   \
+		ipc_log_string((dev)->ipc_log_long, \
+			"DBG1:%s: " fmt, __func__, arg); \
+	if ((dev) && (dev)->ipc_log)   \
+		ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
+	if (msm_pcie_debug_mask)   \
+		pr_alert("%s: " fmt, __func__, arg);		  \
+	} while (0)
+
+#define PCIE_DBG2(dev, fmt, arg...) do {			 \
+	if ((dev) && (dev)->ipc_log)   \
+		ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, __func__, arg);\
+	if (msm_pcie_debug_mask)   \
+		pr_alert("%s: " fmt, __func__, arg);              \
+	} while (0)
+
+#define PCIE_DBG3(dev, fmt, arg...) do {			 \
+	if ((dev) && (dev)->ipc_log)   \
+		ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, __func__, arg);\
+	if (msm_pcie_debug_mask)   \
+		pr_alert("%s: " fmt, __func__, arg);              \
+	} while (0)
+
+#define PCIE_DUMP(dev, fmt, arg...) do {			\
+	if ((dev) && (dev)->ipc_log_dump) \
+		ipc_log_string((dev)->ipc_log_dump, \
+			"DUMP:%s: " fmt, __func__, arg); \
+	} while (0)
+
+#define PCIE_DBG_FS(dev, fmt, arg...) do {			\
+	if ((dev) && (dev)->ipc_log_dump) \
+		ipc_log_string((dev)->ipc_log_dump, \
+			"DBG_FS:%s: " fmt, __func__, arg); \
+	pr_alert("%s: " fmt, __func__, arg); \
+	} while (0)
+
+#define PCIE_INFO(dev, fmt, arg...) do {			 \
+	if ((dev) && (dev)->ipc_log_long)   \
+		ipc_log_string((dev)->ipc_log_long, \
+			"INFO:%s: " fmt, __func__, arg); \
+	if ((dev) && (dev)->ipc_log)   \
+		ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
+	pr_info("%s: " fmt, __func__, arg);  \
+	} while (0)
+
+#define PCIE_ERR(dev, fmt, arg...) do {			 \
+	if ((dev) && (dev)->ipc_log_long)   \
+		ipc_log_string((dev)->ipc_log_long, \
+			"ERR:%s: " fmt, __func__, arg); \
+	if ((dev) && (dev)->ipc_log)   \
+		ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, arg); \
+	pr_err("%s: " fmt, __func__, arg);  \
+	} while (0)
+
+
+enum msm_pcie_res {
+	MSM_PCIE_RES_PARF,
+	MSM_PCIE_RES_PHY,
+	MSM_PCIE_RES_DM_CORE,
+	MSM_PCIE_RES_ELBI,
+	MSM_PCIE_RES_CONF,
+	MSM_PCIE_RES_IO,
+	MSM_PCIE_RES_BARS,
+	MSM_PCIE_RES_TCSR,
+	MSM_PCIE_MAX_RES,
+};
+
+enum msm_pcie_irq {
+	MSM_PCIE_INT_MSI,
+	MSM_PCIE_INT_A,
+	MSM_PCIE_INT_B,
+	MSM_PCIE_INT_C,
+	MSM_PCIE_INT_D,
+	MSM_PCIE_INT_PLS_PME,
+	MSM_PCIE_INT_PME_LEGACY,
+	MSM_PCIE_INT_PLS_ERR,
+	MSM_PCIE_INT_AER_LEGACY,
+	MSM_PCIE_INT_LINK_UP,
+	MSM_PCIE_INT_LINK_DOWN,
+	MSM_PCIE_INT_BRIDGE_FLUSH_N,
+	MSM_PCIE_INT_GLOBAL_INT,
+	MSM_PCIE_MAX_IRQ,
+};
+
+enum msm_pcie_irq_event {
+	MSM_PCIE_INT_EVT_LINK_DOWN = 1,
+	MSM_PCIE_INT_EVT_BME,
+	MSM_PCIE_INT_EVT_PM_TURNOFF,
+	MSM_PCIE_INT_EVT_DEBUG,
+	MSM_PCIE_INT_EVT_LTR,
+	MSM_PCIE_INT_EVT_MHI_Q6,
+	MSM_PCIE_INT_EVT_MHI_A7,
+	MSM_PCIE_INT_EVT_DSTATE_CHANGE,
+	MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
+	MSM_PCIE_INT_EVT_MMIO_WRITE,
+	MSM_PCIE_INT_EVT_CFG_WRITE,
+	MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
+	MSM_PCIE_INT_EVT_LINK_UP,
+	MSM_PCIE_INT_EVT_AER_LEGACY,
+	MSM_PCIE_INT_EVT_AER_ERR,
+	MSM_PCIE_INT_EVT_PME_LEGACY,
+	MSM_PCIE_INT_EVT_PLS_PME,
+	MSM_PCIE_INT_EVT_INTD,
+	MSM_PCIE_INT_EVT_INTC,
+	MSM_PCIE_INT_EVT_INTB,
+	MSM_PCIE_INT_EVT_INTA,
+	MSM_PCIE_INT_EVT_EDMA,
+	MSM_PCIE_INT_EVT_MSI_0,
+	MSM_PCIE_INT_EVT_MSI_1,
+	MSM_PCIE_INT_EVT_MSI_2,
+	MSM_PCIE_INT_EVT_MSI_3,
+	MSM_PCIE_INT_EVT_MSI_4,
+	MSM_PCIE_INT_EVT_MSI_5,
+	MSM_PCIE_INT_EVT_MSI_6,
+	MSM_PCIE_INT_EVT_MSI_7,
+	MSM_PCIE_INT_EVT_MAX = 30,
+};
+
+enum msm_pcie_gpio {
+	MSM_PCIE_GPIO_PERST,
+	MSM_PCIE_GPIO_WAKE,
+	MSM_PCIE_GPIO_EP,
+	MSM_PCIE_MAX_GPIO
+};
+
+enum msm_pcie_link_status {
+	MSM_PCIE_LINK_DEINIT,
+	MSM_PCIE_LINK_ENABLED,
+	MSM_PCIE_LINK_DISABLED
+};
+
+/* gpio info structure */
+struct msm_pcie_gpio_info_t {
+	char	*name;
+	uint32_t   num;
+	bool	 out;
+	uint32_t   on;
+	uint32_t   init;
+	bool	required;
+};
+
+/* voltage regulator info structrue */
+struct msm_pcie_vreg_info_t {
+	struct regulator  *hdl;
+	char		  *name;
+	uint32_t	     max_v;
+	uint32_t	     min_v;
+	uint32_t	     opt_mode;
+	bool		   required;
+};
+
+/* reset info structure */
+struct msm_pcie_reset_info_t {
+	struct reset_control *hdl;
+	char *name;
+	bool required;
+};
+
+/* clock info structure */
+struct msm_pcie_clk_info_t {
+	struct clk  *hdl;
+	char	  *name;
+	u32	   freq;
+	bool	config_mem;
+	bool	  required;
+};
+
+/* resource info structure */
+struct msm_pcie_res_info_t {
+	char		*name;
+	struct resource *resource;
+	void __iomem    *base;
+};
+
+/* irq info structrue */
+struct msm_pcie_irq_info_t {
+	char		  *name;
+	uint32_t	    num;
+};
+
+/* phy info structure */
+struct msm_pcie_phy_info_t {
+	u32	offset;
+	u32	val;
+	u32	delay;
+};
+
+/* PCIe device info structure */
+struct msm_pcie_device_info {
+	u32			bdf;
+	struct pci_dev		*dev;
+	short			short_bdf;
+	u32			sid;
+	int			domain;
+	void __iomem		*conf_base;
+	unsigned long		phy_address;
+	u32			dev_ctrlstts_offset;
+	struct msm_pcie_register_event *event_reg;
+	bool			registered;
+};
+
+/* msm pcie device structure */
+struct msm_pcie_dev_t {
+	struct platform_device	 *pdev;
+	struct pci_dev *dev;
+	struct regulator *gdsc;
+	struct regulator *gdsc_smmu;
+	struct msm_pcie_vreg_info_t  vreg[MSM_PCIE_MAX_VREG];
+	struct msm_pcie_gpio_info_t  gpio[MSM_PCIE_MAX_GPIO];
+	struct msm_pcie_clk_info_t   clk[MSM_PCIE_MAX_CLK];
+	struct msm_pcie_clk_info_t   pipeclk[MSM_PCIE_MAX_PIPE_CLK];
+	struct msm_pcie_res_info_t   res[MSM_PCIE_MAX_RES];
+	struct msm_pcie_irq_info_t   irq[MSM_PCIE_MAX_IRQ];
+	struct msm_pcie_irq_info_t	msi[MSM_PCIE_MAX_MSI];
+	struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
+	struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
+
+	void __iomem		     *parf;
+	void __iomem		     *phy;
+	void __iomem		     *elbi;
+	void __iomem		     *dm_core;
+	void __iomem		     *conf;
+	void __iomem		     *bars;
+	void __iomem		     *tcsr;
+
+	uint32_t			    axi_bar_start;
+	uint32_t			    axi_bar_end;
+
+	struct resource		   *dev_mem_res;
+	struct resource		   *dev_io_res;
+
+	uint32_t			    wake_n;
+	uint32_t			    vreg_n;
+	uint32_t			    gpio_n;
+	uint32_t			    parf_deemph;
+	uint32_t			    parf_swing;
+
+	bool				 cfg_access;
+	spinlock_t			 cfg_lock;
+	unsigned long		    irqsave_flags;
+	struct mutex			enumerate_lock;
+	struct mutex		     setup_lock;
+
+	struct irq_domain		*irq_domain;
+	DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_NR_IRQS);
+	uint32_t			   msi_gicm_addr;
+	uint32_t			   msi_gicm_base;
+	bool				 use_msi;
+
+	enum msm_pcie_link_status    link_status;
+	bool				 user_suspend;
+	bool                         disable_pc;
+	struct pci_saved_state	     *saved_state;
+
+	struct wakeup_source	     ws;
+	struct msm_bus_scale_pdata   *bus_scale_table;
+	uint32_t			   bus_client;
+
+	bool				l0s_supported;
+	bool				l1_supported;
+	bool				 l1ss_supported;
+	bool				common_clk_en;
+	bool				clk_power_manage_en;
+	bool				 aux_clk_sync;
+	bool				aer_enable;
+	bool				smmu_exist;
+	uint32_t			smmu_sid_base;
+	uint32_t			   n_fts;
+	bool				 ext_ref_clk;
+	bool				common_phy;
+	uint32_t			   ep_latency;
+	uint32_t			wr_halt_size;
+	uint32_t			cpl_timeout;
+	uint32_t			current_bdf;
+	short				current_short_bdf;
+	uint32_t			perst_delay_us_min;
+	uint32_t			perst_delay_us_max;
+	uint32_t			tlp_rd_size;
+	bool				linkdown_panic;
+	bool				 ep_wakeirq;
+
+	uint32_t			   rc_idx;
+	uint32_t			phy_ver;
+	bool				drv_ready;
+	bool				 enumerated;
+	struct work_struct	     handle_wake_work;
+	struct mutex		     recovery_lock;
+	spinlock_t                   linkdown_lock;
+	spinlock_t                   wakeup_lock;
+	spinlock_t			global_irq_lock;
+	spinlock_t			aer_lock;
+	ulong				linkdown_counter;
+	ulong				link_turned_on_counter;
+	ulong				link_turned_off_counter;
+	ulong				rc_corr_counter;
+	ulong				rc_non_fatal_counter;
+	ulong				rc_fatal_counter;
+	ulong				ep_corr_counter;
+	ulong				ep_non_fatal_counter;
+	ulong				ep_fatal_counter;
+	bool				 suspending;
+	ulong				wake_counter;
+	u32				num_active_ep;
+	u32				num_ep;
+	bool				pending_ep_reg;
+	u32				phy_len;
+	u32				port_phy_len;
+	struct msm_pcie_phy_info_t	*phy_sequence;
+	struct msm_pcie_phy_info_t	*port_phy_sequence;
+	u32		ep_shadow[MAX_DEVICE_NUM][PCIE_CONF_SPACE_DW];
+	u32				  rc_shadow[PCIE_CONF_SPACE_DW];
+	bool				 shadow_en;
+	bool				bridge_found;
+	struct msm_pcie_register_event *event_reg;
+	unsigned int			scm_dev_id;
+	bool				 power_on;
+	void				 *ipc_log;
+	void				*ipc_log_long;
+	void				*ipc_log_dump;
+	bool				use_19p2mhz_aux_clk;
+	bool				use_pinctrl;
+	struct pinctrl			*pinctrl;
+	struct pinctrl_state		*pins_default;
+	struct pinctrl_state		*pins_sleep;
+	struct msm_pcie_device_info   pcidev_table[MAX_DEVICE_NUM];
+};
+
+
+/* debug mask sys interface */
+static int msm_pcie_debug_mask;
+module_param_named(debug_mask, msm_pcie_debug_mask,
+			    int, 0644);
+
+/* debugfs values */
+static u32 rc_sel;
+static u32 base_sel;
+static u32 wr_offset;
+static u32 wr_mask;
+static u32 wr_value;
+static ulong corr_counter_limit = 5;
+
+/* counter to keep track if common PHY needs to be configured */
+static u32 num_rc_on;
+
+/* global lock for PCIe common PHY */
+static struct mutex com_phy_lock;
+
+/* Table to track info of PCIe devices */
+static struct msm_pcie_device_info
+	msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM];
+
+/* PCIe driver state */
+struct pcie_drv_sta {
+	u32 rc_num;
+	struct mutex drv_lock;
+} pcie_drv;
+
+/* msm pcie device data */
+static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
+
+/* regulators */
+static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
+	{NULL, "vreg-3.3", 0, 0, 0, false},
+	{NULL, "vreg-1.8", 1800000, 1800000, 14000, true},
+	{NULL, "vreg-0.9", 1000000, 1000000, 40000, true},
+	{NULL, "vreg-cx", 0, 0, 0, false}
+};
+
+/* GPIOs */
+static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
+	{"perst-gpio",		0, 1, 0, 0, 1},
+	{"wake-gpio",		0, 0, 0, 0, 0},
+	{"qcom,ep-gpio",	0, 1, 1, 0, 0}
+};
+
+/* resets */
+static struct msm_pcie_reset_info_t
+msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
+	{
+		{NULL, "pcie_phy_reset", false},
+		{NULL, "pcie_phy_com_reset", false},
+		{NULL, "pcie_phy_nocsr_com_phy_reset", false},
+		{NULL, "pcie_0_phy_reset", false}
+	},
+	{
+		{NULL, "pcie_phy_reset", false},
+		{NULL, "pcie_phy_com_reset", false},
+		{NULL, "pcie_phy_nocsr_com_phy_reset", false},
+		{NULL, "pcie_1_phy_reset", false}
+	},
+	{
+		{NULL, "pcie_phy_reset", false},
+		{NULL, "pcie_phy_com_reset", false},
+		{NULL, "pcie_phy_nocsr_com_phy_reset", false},
+		{NULL, "pcie_2_phy_reset", false}
+	}
+};
+
+/* pipe reset  */
+static struct msm_pcie_reset_info_t
+msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
+	{
+		{NULL, "pcie_0_phy_pipe_reset", false}
+	},
+	{
+		{NULL, "pcie_1_phy_pipe_reset", false}
+	},
+	{
+		{NULL, "pcie_2_phy_pipe_reset", false}
+	}
+};
+
+/* clocks */
+static struct msm_pcie_clk_info_t
+	msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
+	{
+	{NULL, "pcie_0_ref_clk_src", 0, false, false},
+	{NULL, "pcie_0_aux_clk", 1010000, false, true},
+	{NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
+	{NULL, "pcie_0_mstr_axi_clk", 0, true, true},
+	{NULL, "pcie_0_slv_axi_clk", 0, true, true},
+	{NULL, "pcie_0_ldo", 0, false, true},
+	{NULL, "pcie_0_smmu_clk", 0, false, false},
+	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+	{NULL, "pcie_phy_aux_clk", 0, false, false}
+	},
+	{
+	{NULL, "pcie_1_ref_clk_src", 0, false, false},
+	{NULL, "pcie_1_aux_clk", 1010000, false, true},
+	{NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
+	{NULL, "pcie_1_mstr_axi_clk", 0, true, true},
+	{NULL, "pcie_1_slv_axi_clk", 0, true,  true},
+	{NULL, "pcie_1_ldo", 0, false, true},
+	{NULL, "pcie_1_smmu_clk", 0, false, false},
+	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+	{NULL, "pcie_phy_aux_clk", 0, false, false}
+	},
+	{
+	{NULL, "pcie_2_ref_clk_src", 0, false, false},
+	{NULL, "pcie_2_aux_clk", 1010000, false, true},
+	{NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
+	{NULL, "pcie_2_mstr_axi_clk", 0, true, true},
+	{NULL, "pcie_2_slv_axi_clk", 0, true, true},
+	{NULL, "pcie_2_ldo", 0, false, true},
+	{NULL, "pcie_2_smmu_clk", 0, false, false},
+	{NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+	{NULL, "pcie_phy_aux_clk", 0, false, false}
+	}
+};
+
+/* Pipe Clocks */
+static struct msm_pcie_clk_info_t
+	msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
+	{
+	{NULL, "pcie_0_pipe_clk", 125000000, true, true},
+	},
+	{
+	{NULL, "pcie_1_pipe_clk", 125000000, true, true},
+	},
+	{
+	{NULL, "pcie_2_pipe_clk", 125000000, true, true},
+	}
+};
+
+/* resources */
+static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
+	{"parf",	0, 0},
+	{"phy",     0, 0},
+	{"dm_core",	0, 0},
+	{"elbi",	0, 0},
+	{"conf",	0, 0},
+	{"io",		0, 0},
+	{"bars",	0, 0},
+	{"tcsr",	0, 0}
+};
+
+/* irqs */
+static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
+	{"int_msi",	0},
+	{"int_a",	0},
+	{"int_b",	0},
+	{"int_c",	0},
+	{"int_d",	0},
+	{"int_pls_pme",		0},
+	{"int_pme_legacy",	0},
+	{"int_pls_err",		0},
+	{"int_aer_legacy",	0},
+	{"int_pls_link_up",	0},
+	{"int_pls_link_down",	0},
+	{"int_bridge_flush_n",	0},
+	{"int_global_int",	0}
+};
+
+/* MSIs */
+static const struct msm_pcie_irq_info_t msm_pcie_msi_info[MSM_PCIE_MAX_MSI] = {
+	{"msi_0", 0}, {"msi_1", 0}, {"msi_2", 0}, {"msi_3", 0},
+	{"msi_4", 0}, {"msi_5", 0}, {"msi_6", 0}, {"msi_7", 0},
+	{"msi_8", 0}, {"msi_9", 0}, {"msi_10", 0}, {"msi_11", 0},
+	{"msi_12", 0}, {"msi_13", 0}, {"msi_14", 0}, {"msi_15", 0},
+	{"msi_16", 0}, {"msi_17", 0}, {"msi_18", 0}, {"msi_19", 0},
+	{"msi_20", 0}, {"msi_21", 0}, {"msi_22", 0}, {"msi_23", 0},
+	{"msi_24", 0}, {"msi_25", 0}, {"msi_26", 0}, {"msi_27", 0},
+	{"msi_28", 0}, {"msi_29", 0}, {"msi_30", 0}, {"msi_31", 0}
+};
+
+#ifdef CONFIG_ARM
+#define PCIE_BUS_PRIV_DATA(bus) \
+	(((struct pci_sys_data *)bus->sysdata)->private_data)
+
+static struct pci_sys_data msm_pcie_sys_data[MAX_RC_NUM];
+
+static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
+{
+	msm_pcie_sys_data[dev->rc_idx].domain = dev->rc_idx;
+	msm_pcie_sys_data[dev->rc_idx].private_data = dev;
+
+	return &msm_pcie_sys_data[dev->rc_idx];
+}
+
+static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
+{
+	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+}
+#else
+#define PCIE_BUS_PRIV_DATA(bus) \
+	(struct msm_pcie_dev_t *)(bus->sysdata)
+
+static inline void *msm_pcie_setup_sys_data(struct msm_pcie_dev_t *dev)
+{
+	return dev;
+}
+
+static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev)
+{
+}
+#endif
+
+static inline void msm_pcie_write_reg(void *base, u32 offset, u32 value)
+{
+	writel_relaxed(value, base + offset);
+	/* ensure that changes propagated to the hardware */
+	wmb();
+}
+
+static inline void msm_pcie_write_reg_field(void *base, u32 offset,
+	const u32 mask, u32 val)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 tmp = readl_relaxed(base + offset);
+
+	tmp &= ~mask; /* clear written bits */
+	val = tmp | (val << shift);
+	writel_relaxed(val, base + offset);
+	/* ensure that changes propagated to the hardware */
+	wmb();
+}
+
+static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
+	struct msm_pcie_clk_info_t *info)
+{
+	int ret;
+
+	ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
+	if (ret)
+		PCIE_ERR(dev,
+			"PCIe: RC%d can't configure core memory for clk %s: %d.\n",
+			dev->rc_idx, info->name, ret);
+	else
+		PCIE_DBG2(dev,
+			"PCIe: RC%d configured core memory for clk %s.\n",
+			dev->rc_idx, info->name);
+
+	ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
+	if (ret)
+		PCIE_ERR(dev,
+			"PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
+			dev->rc_idx, info->name, ret);
+	else
+		PCIE_DBG2(dev,
+			"PCIe: RC%d configured peripheral memory for clk %s.\n",
+			dev->rc_idx, info->name);
+}
+
+#if defined(CONFIG_ARCH_FSM9010)
+#define PCIE20_PARF_PHY_STTS         0x3c
+#define PCIE2_PHY_RESET_CTRL         0x44
+#define PCIE20_PARF_PHY_REFCLK_CTRL2 0xa0
+#define PCIE20_PARF_PHY_REFCLK_CTRL3 0xa4
+#define PCIE20_PARF_PCS_SWING_CTRL1  0x88
+#define PCIE20_PARF_PCS_SWING_CTRL2  0x8c
+#define PCIE20_PARF_PCS_DEEMPH1      0x74
+#define PCIE20_PARF_PCS_DEEMPH2      0x78
+#define PCIE20_PARF_PCS_DEEMPH3      0x7c
+#define PCIE20_PARF_CONFIGBITS       0x84
+#define PCIE20_PARF_PHY_CTRL3        0x94
+#define PCIE20_PARF_PCS_CTRL         0x80
+
+#define TX_AMP_VAL                   127
+#define PHY_RX0_EQ_GEN1_VAL          0
+#define PHY_RX0_EQ_GEN2_VAL          4
+#define TX_DEEMPH_GEN1_VAL           24
+#define TX_DEEMPH_GEN2_3_5DB_VAL     24
+#define TX_DEEMPH_GEN2_6DB_VAL       34
+#define PHY_TX0_TERM_OFFST_VAL       0
+
+static inline void pcie_phy_dump(struct msm_pcie_dev_t *dev)
+{
+}
+
+static inline void pcie20_phy_reset(struct msm_pcie_dev_t *dev, uint32_t assert)
+{
+	msm_pcie_write_reg_field(dev->phy, PCIE2_PHY_RESET_CTRL,
+					 BIT(0), (assert) ? 1 : 0);
+}
+
+static void pcie_phy_init(struct msm_pcie_dev_t *dev)
+{
+	PCIE_DBG(dev, "RC%d: Initializing 28LP SNS phy - 100MHz\n",
+		dev->rc_idx);
+
+	/* De-assert Phy SW Reset */
+	pcie20_phy_reset(dev, 1);
+
+	/* Program SSP ENABLE */
+	if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL2) & BIT(0))
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2,
+								 BIT(0), 0);
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_REFCLK_CTRL3) &
+								 BIT(0)) == 0)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL3,
+								 BIT(0), 1);
+	/* Program Tx Amplitude */
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL1) &
+		(BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				TX_AMP_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL1,
+			BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				TX_AMP_VAL);
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_SWING_CTRL2) &
+		(BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				TX_AMP_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_SWING_CTRL2,
+			BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				TX_AMP_VAL);
+	/* Program De-Emphasis */
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH1) &
+			(BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				TX_DEEMPH_GEN2_6DB_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH1,
+			BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				TX_DEEMPH_GEN2_6DB_VAL);
+
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH2) &
+			(BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				TX_DEEMPH_GEN2_3_5DB_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH2,
+			BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				TX_DEEMPH_GEN2_3_5DB_VAL);
+
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PCS_DEEMPH3) &
+			(BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				TX_DEEMPH_GEN1_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_DEEMPH3,
+			BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				TX_DEEMPH_GEN1_VAL);
+
+	/* Program Rx_Eq */
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_CONFIGBITS) &
+			(BIT(2)|BIT(1)|BIT(0))) != PHY_RX0_EQ_GEN1_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_CONFIGBITS,
+				 BIT(2)|BIT(1)|BIT(0), PHY_RX0_EQ_GEN1_VAL);
+
+	/* Program Tx0_term_offset */
+	if ((readl_relaxed(dev->phy + PCIE20_PARF_PHY_CTRL3) &
+			(BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0))) !=
+				PHY_TX0_TERM_OFFST_VAL)
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_CTRL3,
+			 BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
+				PHY_TX0_TERM_OFFST_VAL);
+
+	/* Program REF_CLK source */
+	msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PHY_REFCLK_CTRL2, BIT(1),
+		(dev->ext_ref_clk) ? 1 : 0);
+	/* disable Tx2Rx Loopback */
+	if (readl_relaxed(dev->phy + PCIE20_PARF_PCS_CTRL) & BIT(1))
+		msm_pcie_write_reg_field(dev->phy, PCIE20_PARF_PCS_CTRL,
+								 BIT(1), 0);
+	/* De-assert Phy SW Reset */
+	pcie20_phy_reset(dev, 0);
+}
+
+static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
+{
+
+	/* read PCIE20_PARF_PHY_STTS twice */
+	readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS);
+	if (readl_relaxed(dev->phy + PCIE20_PARF_PHY_STTS) & BIT(0))
+		return false;
+	else
+		return true;
+}
+#else
+static void pcie_phy_dump_test_cntrl(struct msm_pcie_dev_t *dev,
+					u32 cntrl4_val, u32 cntrl5_val,
+					u32 cntrl6_val, u32 cntrl7_val)
+{
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TEST_CONTROL4(dev->rc_idx, dev->common_phy), cntrl4_val);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TEST_CONTROL5(dev->rc_idx, dev->common_phy), cntrl5_val);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TEST_CONTROL6(dev->rc_idx, dev->common_phy), cntrl6_val);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TEST_CONTROL7(dev->rc_idx, dev->common_phy), cntrl7_val);
+
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_TEST_CONTROL4: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_TEST_CONTROL4(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_TEST_CONTROL5: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_TEST_CONTROL5(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_TEST_CONTROL6: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_TEST_CONTROL6(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_TEST_CONTROL7: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_TEST_CONTROL7(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_DEBUG_BUS_0_STATUS: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_DEBUG_BUS_0_STATUS(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_DEBUG_BUS_1_STATUS: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_DEBUG_BUS_1_STATUS(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_DEBUG_BUS_2_STATUS: 0x%x\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_DEBUG_BUS_2_STATUS(dev->rc_idx,
+				dev->common_phy)));
+	PCIE_DUMP(dev,
+		"PCIe: RC%d PCIE_N_DEBUG_BUS_3_STATUS: 0x%x\n\n", dev->rc_idx,
+		readl_relaxed(dev->phy +
+			PCIE_N_DEBUG_BUS_3_STATUS(dev->rc_idx,
+				dev->common_phy)));
+}
+
+static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
+{
+	int i, size;
+	u32 write_val;
+
+	if (dev->phy_ver >= 0x20) {
+		PCIE_DUMP(dev, "PCIe: RC%d PHY dump is not supported\n",
+			dev->rc_idx);
+		return;
+	}
+
+	PCIE_DUMP(dev, "PCIe: RC%d PHY testbus\n", dev->rc_idx);
+
+	pcie_phy_dump_test_cntrl(dev, 0x18, 0x19, 0x1A, 0x1B);
+	pcie_phy_dump_test_cntrl(dev, 0x1C, 0x1D, 0x1E, 0x1F);
+	pcie_phy_dump_test_cntrl(dev, 0x20, 0x21, 0x22, 0x23);
+
+	for (i = 0; i < 3; i++) {
+		write_val = 0x1 + i;
+		msm_pcie_write_reg(dev->phy,
+			QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
+				dev->common_phy), write_val);
+		PCIE_DUMP(dev,
+			"PCIe: RC%d QSERDES_TX_N_DEBUG_BUS_SEL: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				QSERDES_TX_N_DEBUG_BUS_SEL(dev->rc_idx,
+					dev->common_phy)));
+
+		pcie_phy_dump_test_cntrl(dev, 0x30, 0x31, 0x32, 0x33);
+	}
+
+	pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
+
+	if (dev->phy_ver >= 0x10 && dev->phy_ver < 0x20) {
+		pcie_phy_dump_test_cntrl(dev, 0x01, 0x02, 0x03, 0x0A);
+		pcie_phy_dump_test_cntrl(dev, 0x0E, 0x0F, 0x12, 0x13);
+		pcie_phy_dump_test_cntrl(dev, 0, 0, 0, 0);
+
+		for (i = 0; i < 8; i += 4) {
+			write_val = 0x1 + i;
+			msm_pcie_write_reg(dev->phy,
+				PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(dev->rc_idx,
+					dev->common_phy), write_val);
+			msm_pcie_write_reg(dev->phy,
+				PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(dev->rc_idx,
+					dev->common_phy), write_val + 1);
+			msm_pcie_write_reg(dev->phy,
+				PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(dev->rc_idx,
+					dev->common_phy), write_val + 2);
+			msm_pcie_write_reg(dev->phy,
+				PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(dev->rc_idx,
+					dev->common_phy), write_val + 3);
+
+			PCIE_DUMP(dev,
+				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d to PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_0_STATUS: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_0_STATUS(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_1_STATUS: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_1_STATUS(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_2_STATUS: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_2_STATUS(
+						dev->rc_idx, dev->common_phy)));
+			PCIE_DUMP(dev,
+				"PCIe: RC%d PCIE_MISC_N_DEBUG_BUS_3_STATUS: 0x%x\n",
+				dev->rc_idx,
+				readl_relaxed(dev->phy +
+					PCIE_MISC_N_DEBUG_BUS_3_STATUS(
+						dev->rc_idx, dev->common_phy)));
+		}
+
+		msm_pcie_write_reg(dev->phy,
+			PCIE_MISC_N_DEBUG_BUS_BYTE0_INDEX(
+				dev->rc_idx, dev->common_phy), 0);
+		msm_pcie_write_reg(dev->phy,
+			PCIE_MISC_N_DEBUG_BUS_BYTE1_INDEX(
+				dev->rc_idx, dev->common_phy), 0);
+		msm_pcie_write_reg(dev->phy,
+			PCIE_MISC_N_DEBUG_BUS_BYTE2_INDEX(
+				dev->rc_idx, dev->common_phy), 0);
+		msm_pcie_write_reg(dev->phy,
+			PCIE_MISC_N_DEBUG_BUS_BYTE3_INDEX(
+				dev->rc_idx, dev->common_phy), 0);
+	}
+
+	for (i = 0; i < 2; i++) {
+		write_val = 0x2 + i;
+
+		msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL,
+			write_val);
+
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to QSERDES_COM_DEBUG_BUS_SEL: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS_SEL));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d QSERDES_COM_DEBUG_BUS0: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS0));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d QSERDES_COM_DEBUG_BUS1: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS1));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d QSERDES_COM_DEBUG_BUS2: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS2));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d QSERDES_COM_DEBUG_BUS3: 0x%x\n\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy + QSERDES_COM_DEBUG_BUS3));
+	}
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DEBUG_BUS_SEL, 0);
+
+	if (dev->common_phy) {
+		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
+			0x01);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE1_INDEX,
+			0x02);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE2_INDEX,
+			0x03);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE3_INDEX,
+			0x04);
+
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE1_INDEX: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_BYTE1_INDEX));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE2_INDEX: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_BYTE2_INDEX));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE3_INDEX: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_BYTE3_INDEX));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_0_STATUS));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d PCIE_COM_DEBUG_BUS_1_STATUS: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_1_STATUS));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d PCIE_COM_DEBUG_BUS_2_STATUS: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_2_STATUS));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d PCIE_COM_DEBUG_BUS_3_STATUS: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_3_STATUS));
+
+		msm_pcie_write_reg(dev->phy, PCIE_COM_DEBUG_BUS_BYTE0_INDEX,
+			0x05);
+
+		PCIE_DUMP(dev,
+			"PCIe: RC%d to PCIE_COM_DEBUG_BUS_BYTE0_INDEX: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_BYTE0_INDEX));
+		PCIE_DUMP(dev,
+			"PCIe: RC%d PCIE_COM_DEBUG_BUS_0_STATUS: 0x%x\n\n",
+			dev->rc_idx,
+			readl_relaxed(dev->phy +
+				PCIE_COM_DEBUG_BUS_0_STATUS));
+	}
+
+	size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
+	for (i = 0; i < size; i += 32) {
+		PCIE_DUMP(dev,
+			"PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+			dev->rc_idx, i,
+			readl_relaxed(dev->phy + i),
+			readl_relaxed(dev->phy + (i + 4)),
+			readl_relaxed(dev->phy + (i + 8)),
+			readl_relaxed(dev->phy + (i + 12)),
+			readl_relaxed(dev->phy + (i + 16)),
+			readl_relaxed(dev->phy + (i + 20)),
+			readl_relaxed(dev->phy + (i + 24)),
+			readl_relaxed(dev->phy + (i + 28)));
+	}
+}
+
+#ifdef CONFIG_ARCH_MDMCALIFORNIUM
+static void pcie_phy_init(struct msm_pcie_dev_t *dev)
+{
+	u8 common_phy;
+
+	PCIE_DBG(dev,
+		"RC%d: Initializing MDM 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
+		dev->rc_idx);
+
+	if (dev->common_phy)
+		common_phy = 1;
+	else
+		common_phy = 0;
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SW_RESET(dev->rc_idx, common_phy),
+		0x01);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
+		0x03);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x18);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
+
+	msm_pcie_write_reg(dev->phy,
+			QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy), 0x06);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x20);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
+
+	if (dev->tcsr) {
+		PCIE_DBG(dev, "RC%d: TCSR PHY clock scheme is 0x%x\n",
+			dev->rc_idx, readl_relaxed(dev->tcsr));
+
+		if (readl_relaxed(dev->tcsr) & (BIT(1) | BIT(0)))
+			msm_pcie_write_reg(dev->phy,
+					QSERDES_COM_SYSCLK_EN_SEL, 0x0A);
+		else
+			msm_pcie_write_reg(dev->phy,
+					QSERDES_COM_SYSCLK_EN_SEL, 0x04);
+	}
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x0D);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x04);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
+
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
+		common_phy), 0x45);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
+
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_TX_N_RES_CODE_LANE_OFFSET(dev->rc_idx, common_phy),
+		0x02);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_TX_N_RCV_DETECT_LVL_2(dev->rc_idx, common_phy),
+		0x12);
+
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
+		0x1C);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
+		0x14);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
+		0x01);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
+		0xDB);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
+		common_phy),
+		0x4B);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
+		0x04);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
+		0x04);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
+		0x04);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_OSC_DTCT_ACTIONS(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
+		0x40);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB(dev->rc_idx, common_phy),
+		0x40);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK_MSB(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
+		0x40);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
+		0x73);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
+		0x99);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TXDEEMPH_M6DB_V0(dev->rc_idx, common_phy),
+		0x15);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
+		0x0E);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SIGDET_CNTRL(dev->rc_idx, common_phy),
+		0x07);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SW_RESET(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
+		0x03);
+}
+
+static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
+{
+}
+
+static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
+{
+	if (readl_relaxed(dev->phy +
+		PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) & BIT(6))
+		return false;
+	else
+		return true;
+}
+#else
+static void pcie_phy_init(struct msm_pcie_dev_t *dev)
+{
+	int i;
+	struct msm_pcie_phy_info_t *phy_seq;
+
+	PCIE_DBG(dev,
+		"RC%d: Initializing 14nm QMP phy - 19.2MHz with Common Mode Clock (SSC ON)\n",
+		dev->rc_idx);
+
+	if (dev->phy_sequence) {
+		i =  dev->phy_len;
+		phy_seq = dev->phy_sequence;
+		while (i--) {
+			msm_pcie_write_reg(dev->phy,
+				phy_seq->offset,
+				phy_seq->val);
+			if (phy_seq->delay)
+				usleep_range(phy_seq->delay,
+					phy_seq->delay + 1);
+			phy_seq++;
+		}
+		return;
+	}
+
+	if (dev->common_phy)
+		msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0x01);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1C);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CMN_CONFIG, 0x06);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP_EN, 0x42);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_MAP, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER1, 0xFF);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_VCO_TUNE_TIMER2, 0x1F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORE_CLK_EN, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CORECLK_DIV, 0x0A);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TIMER, 0x09);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DEC_START_MODE0, 0x82);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START3_MODE0, 0x03);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START2_MODE0, 0x55);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_DIV_FRAC_START1_MODE0, 0x55);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP2_MODE0, 0x1A);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_LOCK_CMP1_MODE0, 0x0A);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_SELECT, 0x33);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SYSCLK_EN_SEL, 0x04);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CP_CTRL_MODE0, 0x0B);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_EN_CENTER, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER1, 0x31);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_PER2, 0x01);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER1, 0x02);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_ADJ_PER2, 0x00);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE1, 0x2f);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_SSC_STEP_SIZE2, 0x19);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_BG_TRIM, 0x0F);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_PLL_IVCO, 0x0F);
+
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_EP_DIV, 0x19);
+	msm_pcie_write_reg(dev->phy, QSERDES_COM_CLK_ENABLE1, 0x10);
+
+	if (dev->phy_ver == 0x3) {
+		msm_pcie_write_reg(dev->phy, QSERDES_COM_HSCLK_SEL, 0x00);
+		msm_pcie_write_reg(dev->phy, QSERDES_COM_RESCODE_DIV_NUM, 0x40);
+	}
+
+	if (dev->common_phy) {
+		msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x00);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_START_CONTROL, 0x03);
+	}
+}
+
+static void pcie_pcs_port_phy_init(struct msm_pcie_dev_t *dev)
+{
+	int i;
+	struct msm_pcie_phy_info_t *phy_seq;
+	u8 common_phy;
+
+	if (dev->phy_ver >= 0x20)
+		return;
+
+	PCIE_DBG(dev, "RC%d: Initializing PCIe PHY Port\n", dev->rc_idx);
+
+	if (dev->common_phy)
+		common_phy = 1;
+	else
+		common_phy = 0;
+
+	if (dev->port_phy_sequence) {
+		i =  dev->port_phy_len;
+		phy_seq = dev->port_phy_sequence;
+		while (i--) {
+			msm_pcie_write_reg(dev->phy,
+				phy_seq->offset,
+				phy_seq->val);
+			if (phy_seq->delay)
+				usleep_range(phy_seq->delay,
+					phy_seq->delay + 1);
+			phy_seq++;
+		}
+		return;
+	}
+
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_TX_N_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN(dev->rc_idx,
+		common_phy), 0x45);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_TX_N_LANE_MODE(dev->rc_idx, common_phy),
+		0x06);
+
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_ENABLES(dev->rc_idx, common_phy),
+		0x1C);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
+		0x17);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL2(dev->rc_idx, common_phy),
+		0x01);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL3(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_EQU_ADAPTOR_CNTRL4(dev->rc_idx, common_phy),
+		0xDB);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_RX_BAND(dev->rc_idx, common_phy),
+		0x18);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_GAIN(dev->rc_idx, common_phy),
+		0x04);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_GAIN_HALF(dev->rc_idx, common_phy),
+		0x04);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_RX_IDLE_DTCT_CNTRL(dev->rc_idx, common_phy),
+		0x4C);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_PWRUP_RESET_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_LP_WAKEUP_DLY_TIME_AUXCLK(dev->rc_idx, common_phy),
+		0x01);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_PLL_LOCK_CHK_DLY_TIME(dev->rc_idx, common_phy),
+		0x05);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_UCDR_SO_SATURATION_AND_ENABLE(dev->rc_idx,
+		common_phy), 0x4B);
+	msm_pcie_write_reg(dev->phy,
+		QSERDES_RX_N_SIGDET_DEGLITCH_CNTRL(dev->rc_idx, common_phy),
+		0x14);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_ENDPOINT_REFCLK_DRIVE(dev->rc_idx, common_phy),
+		0x05);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
+		0x02);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_STATE_CONFIG4(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_STATE_CONFIG1(dev->rc_idx, common_phy),
+		0xA3);
+
+	if (dev->phy_ver == 0x3) {
+		msm_pcie_write_reg(dev->phy,
+			QSERDES_RX_N_SIGDET_LVL(dev->rc_idx, common_phy),
+			0x19);
+
+		msm_pcie_write_reg(dev->phy,
+			PCIE_N_TXDEEMPH_M3P5DB_V0(dev->rc_idx, common_phy),
+			0x0E);
+	}
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, common_phy),
+		0x03);
+	usleep_range(POWER_DOWN_DELAY_US_MIN, POWER_DOWN_DELAY_US_MAX);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SW_RESET(dev->rc_idx, common_phy),
+		0x00);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_START_CONTROL(dev->rc_idx, common_phy),
+		0x0A);
+}
+
+static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
+{
+	if (dev->phy_ver >= 0x20) {
+		if (readl_relaxed(dev->phy +
+			PCIE_N_PCS_STATUS(dev->rc_idx, dev->common_phy)) &
+					BIT(6))
+			return false;
+		else
+			return true;
+	}
+
+	if (!(readl_relaxed(dev->phy + PCIE_COM_PCS_READY_STATUS) & 0x1))
+		return false;
+	else
+		return true;
+}
+#endif
+#endif
+
+static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
+{
+	int ret, scm_ret;
+
+	if (!dev) {
+		pr_err("PCIe: the input pcie dev is NULL.\n");
+		return -ENODEV;
+	}
+
+	ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
+	if (ret || scm_ret) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
+			dev->rc_idx, ret, scm_ret);
+		return ret ? ret : -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
+						u32 offset)
+{
+	if (offset % 4) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d: offset 0x%x is not correctly aligned\n",
+			dev->rc_idx, offset);
+		return MSM_PCIE_ERROR;
+	}
+
+	return 0;
+}
+
+static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
+						bool check_sw_stts,
+						bool check_ep,
+						void __iomem *ep_conf)
+{
+	u32 val;
+
+	if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
+		PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
+			dev->rc_idx);
+		return false;
+	}
+
+	if (!(readl_relaxed(dev->dm_core + 0x80) & BIT(29))) {
+		PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
+			dev->rc_idx);
+		return false;
+	}
+
+	val = readl_relaxed(dev->dm_core);
+	PCIE_DBG(dev, "PCIe: device ID and vender ID of RC %d are 0x%x.\n",
+		dev->rc_idx, val);
+	if (val == PCIE_LINK_DOWN) {
+		PCIE_ERR(dev,
+			"PCIe: The link of RC %d is not really up; device ID and vender ID of RC %d are 0x%x.\n",
+			dev->rc_idx, dev->rc_idx, val);
+		return false;
+	}
+
+	if (check_ep) {
+		val = readl_relaxed(ep_conf);
+		PCIE_DBG(dev,
+			"PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
+			dev->rc_idx, val);
+		if (val == PCIE_LINK_DOWN) {
+			PCIE_ERR(dev,
+				"PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
+				dev->rc_idx, dev->rc_idx, val);
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc)
+{
+	int i, j;
+	u32 val = 0;
+	u32 *shadow;
+	void *cfg = dev->conf;
+
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		if (!rc && !dev->pcidev_table[i].bdf)
+			break;
+		if (rc) {
+			cfg = dev->dm_core;
+			shadow = dev->rc_shadow;
+		} else {
+			if (!msm_pcie_confirm_linkup(dev, false, true,
+				dev->pcidev_table[i].conf_base))
+				continue;
+
+			shadow = dev->ep_shadow[i];
+			PCIE_DBG(dev,
+				"PCIe Device: %02x:%02x.%01x\n",
+				dev->pcidev_table[i].bdf >> 24,
+				dev->pcidev_table[i].bdf >> 19 & 0x1f,
+				dev->pcidev_table[i].bdf >> 16 & 0x07);
+		}
+		for (j = PCIE_CONF_SPACE_DW - 1; j >= 0; j--) {
+			val = shadow[j];
+			if (val != PCIE_CLEAR) {
+				PCIE_DBG3(dev,
+					"PCIe: before recovery:cfg 0x%x:0x%x\n",
+					j * 4, readl_relaxed(cfg + j * 4));
+				PCIE_DBG3(dev,
+					"PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
+					j, j * 4, val);
+				writel_relaxed(val, cfg + j * 4);
+				/* ensure changes propagated to the hardware */
+				wmb();
+				PCIE_DBG3(dev,
+					"PCIe: after recovery:cfg 0x%x:0x%x\n\n",
+					j * 4, readl_relaxed(cfg + j * 4));
+			}
+		}
+		if (rc)
+			break;
+
+		pci_save_state(dev->pcidev_table[i].dev);
+		cfg += SZ_4K;
+	}
+}
+
+static void msm_pcie_write_mask(void __iomem *addr,
+				uint32_t clear_mask, uint32_t set_mask)
+{
+	uint32_t val;
+
+	val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
+	writel_relaxed(val, addr);
+	wmb();  /* ensure data is written to hardware register */
+}
+
+static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
+{
+	int i, size;
+	u32 original;
+
+	PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
+
+	original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
+	for (i = 1; i <= 0x1A; i++) {
+		msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
+				0xFF0000, i << 16);
+		PCIE_DUMP(dev,
+			"RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
+			readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
+	}
+	writel_relaxed(original, dev->parf + PCIE20_PARF_SYS_CTRL);
+
+	PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
+
+	size = resource_size(dev->res[MSM_PCIE_RES_PARF].resource);
+	for (i = 0; i < size; i += 32) {
+		PCIE_DUMP(dev,
+			"RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+			dev->rc_idx, i,
+			readl_relaxed(dev->parf + i),
+			readl_relaxed(dev->parf + (i + 4)),
+			readl_relaxed(dev->parf + (i + 8)),
+			readl_relaxed(dev->parf + (i + 12)),
+			readl_relaxed(dev->parf + (i + 16)),
+			readl_relaxed(dev->parf + (i + 20)),
+			readl_relaxed(dev->parf + (i + 24)),
+			readl_relaxed(dev->parf + (i + 28)));
+	}
+}
+
+static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
+{
+	PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
+		dev->rc_idx, dev->enumerated ? "" : "not");
+	PCIE_DBG_FS(dev, "PCIe: link is %s\n",
+		(dev->link_status == MSM_PCIE_LINK_ENABLED)
+		? "enabled" : "disabled");
+	PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
+		dev->cfg_access ? "" : "not");
+	PCIE_DBG_FS(dev, "use_msi is %d\n",
+		dev->use_msi);
+	PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
+		dev->use_pinctrl);
+	PCIE_DBG_FS(dev, "use_19p2mhz_aux_clk is %d\n",
+		dev->use_19p2mhz_aux_clk);
+	PCIE_DBG_FS(dev, "user_suspend is %d\n",
+		dev->user_suspend);
+	PCIE_DBG_FS(dev, "num_ep: %d\n",
+		dev->num_ep);
+	PCIE_DBG_FS(dev, "num_active_ep: %d\n",
+		dev->num_active_ep);
+	PCIE_DBG_FS(dev, "pending_ep_reg: %s\n",
+		dev->pending_ep_reg ? "true" : "false");
+	PCIE_DBG_FS(dev, "phy_len is %d",
+		dev->phy_len);
+	PCIE_DBG_FS(dev, "port_phy_len is %d",
+		dev->port_phy_len);
+	PCIE_DBG_FS(dev, "disable_pc is %d",
+		dev->disable_pc);
+	PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
+		dev->l0s_supported ? "" : "not");
+	PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
+		dev->l1_supported ? "" : "not");
+	PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
+		dev->l1ss_supported ? "" : "not");
+	PCIE_DBG_FS(dev, "common_clk_en is %d\n",
+		dev->common_clk_en);
+	PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
+		dev->clk_power_manage_en);
+	PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
+		dev->aux_clk_sync);
+	PCIE_DBG_FS(dev, "AER is %s enable\n",
+		dev->aer_enable ? "" : "not");
+	PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
+		dev->ext_ref_clk);
+	PCIE_DBG_FS(dev, "ep_wakeirq is %d\n",
+		dev->ep_wakeirq);
+	PCIE_DBG_FS(dev, "phy_ver is %d\n",
+		dev->phy_ver);
+	PCIE_DBG_FS(dev, "drv_ready is %d\n",
+		dev->drv_ready);
+	PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
+		dev->linkdown_panic);
+	PCIE_DBG_FS(dev, "the link is %s suspending\n",
+		dev->suspending ? "" : "not");
+	PCIE_DBG_FS(dev, "shadow is %s enabled\n",
+		dev->shadow_en ? "" : "not");
+	PCIE_DBG_FS(dev, "the power of RC is %s on\n",
+		dev->power_on ? "" : "not");
+	PCIE_DBG_FS(dev, "msi_gicm_addr: 0x%x\n",
+		dev->msi_gicm_addr);
+	PCIE_DBG_FS(dev, "msi_gicm_base: 0x%x\n",
+		dev->msi_gicm_base);
+	PCIE_DBG_FS(dev, "bus_client: %d\n",
+		dev->bus_client);
+	PCIE_DBG_FS(dev, "current short bdf: %d\n",
+		dev->current_short_bdf);
+	PCIE_DBG_FS(dev, "smmu does %s exist\n",
+		dev->smmu_exist ? "" : "not");
+	PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
+		dev->smmu_sid_base);
+	PCIE_DBG_FS(dev, "n_fts: %d\n",
+		dev->n_fts);
+	PCIE_DBG_FS(dev, "common_phy: %d\n",
+		dev->common_phy);
+	PCIE_DBG_FS(dev, "ep_latency: %dms\n",
+		dev->ep_latency);
+	PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
+		dev->wr_halt_size);
+	PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
+		dev->cpl_timeout);
+	PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
+		dev->current_bdf);
+	PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
+		dev->perst_delay_us_min);
+	PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
+		dev->perst_delay_us_max);
+	PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
+		dev->tlp_rd_size);
+	PCIE_DBG_FS(dev, "rc_corr_counter: %lu\n",
+		dev->rc_corr_counter);
+	PCIE_DBG_FS(dev, "rc_non_fatal_counter: %lu\n",
+		dev->rc_non_fatal_counter);
+	PCIE_DBG_FS(dev, "rc_fatal_counter: %lu\n",
+		dev->rc_fatal_counter);
+	PCIE_DBG_FS(dev, "ep_corr_counter: %lu\n",
+		dev->ep_corr_counter);
+	PCIE_DBG_FS(dev, "ep_non_fatal_counter: %lu\n",
+		dev->ep_non_fatal_counter);
+	PCIE_DBG_FS(dev, "ep_fatal_counter: %lu\n",
+		dev->ep_fatal_counter);
+	PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
+		dev->linkdown_counter);
+	PCIE_DBG_FS(dev, "wake_counter: %lu\n",
+		dev->wake_counter);
+	PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
+		dev->link_turned_on_counter);
+	PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
+		dev->link_turned_off_counter);
+}
+
+static void msm_pcie_shadow_dump(struct msm_pcie_dev_t *dev, bool rc)
+{
+	int i, j;
+	u32 val = 0;
+	u32 *shadow;
+
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		if (!rc && !dev->pcidev_table[i].bdf)
+			break;
+		if (rc) {
+			shadow = dev->rc_shadow;
+		} else {
+			shadow = dev->ep_shadow[i];
+			PCIE_DBG_FS(dev, "PCIe Device: %02x:%02x.%01x\n",
+				dev->pcidev_table[i].bdf >> 24,
+				dev->pcidev_table[i].bdf >> 19 & 0x1f,
+				dev->pcidev_table[i].bdf >> 16 & 0x07);
+		}
+		for (j = 0; j < PCIE_CONF_SPACE_DW; j++) {
+			val = shadow[j];
+			if (val != PCIE_CLEAR) {
+				PCIE_DBG_FS(dev,
+					"PCIe: shadow_dw[%d]:cfg 0x%x:0x%x\n",
+					j, j * 4, val);
+			}
+		}
+		if (rc)
+			break;
+	}
+}
+
+static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
+					u32 testcase)
+{
+	int ret, i;
+	u32 base_sel_size = 0;
+	u32 val = 0;
+	u32 current_offset = 0;
+	u32 ep_l1sub_ctrl1_offset = 0;
+	u32 ep_l1sub_cap_reg1_offset = 0;
+	u32 ep_link_ctrlstts_offset = 0;
+	u32 ep_dev_ctrl2stts2_offset = 0;
+
+	if (testcase >= 5 && testcase <= 10) {
+		current_offset =
+			readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
+
+		while (current_offset) {
+			val = readl_relaxed(dev->conf + current_offset);
+			if ((val & 0xff) == PCIE20_CAP_ID) {
+				ep_link_ctrlstts_offset = current_offset +
+								0x10;
+				ep_dev_ctrl2stts2_offset = current_offset +
+								0x28;
+				break;
+			}
+			current_offset = (val >> 8) & 0xff;
+		}
+
+		if (!ep_link_ctrlstts_offset)
+			PCIE_DBG(dev,
+				"RC%d endpoint does not support PCIe capability registers\n",
+				dev->rc_idx);
+		else
+			PCIE_DBG(dev,
+				"RC%d: ep_link_ctrlstts_offset: 0x%x\n",
+				dev->rc_idx, ep_link_ctrlstts_offset);
+	}
+
+	switch (testcase) {
+	case 0: /* output status */
+		PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
+			dev->rc_idx);
+		msm_pcie_show_status(dev);
+		break;
+	case 1: /* disable link */
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
+		ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
+			dev->dev, NULL,
+			MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+		if (ret)
+			PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
+				__func__);
+		else
+			PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
+				__func__);
+		break;
+	case 2: /* enable link and recover config space for RC and EP */
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: enable link and recover config space\n\n",
+			dev->rc_idx);
+		ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
+			dev->dev, NULL,
+			MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+		if (ret)
+			PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
+				__func__);
+		else {
+			PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
+			msm_pcie_recover_config(dev->dev);
+		}
+		break;
+	case 3: /*
+		 * disable and enable link, recover config space for
+		 * RC and EP
+		 */
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
+			dev->rc_idx);
+		ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0,
+			dev->dev, NULL,
+			MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+		if (ret)
+			PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
+				__func__);
+		else
+			PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
+		ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0,
+			dev->dev, NULL,
+			MSM_PCIE_CONFIG_NO_CFG_RESTORE);
+		if (ret)
+			PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
+				__func__);
+		else {
+			PCIE_DBG_FS(dev, "PCIe:%s:enabled link\n", __func__);
+			msm_pcie_recover_config(dev->dev);
+		}
+		break;
+	case 4: /* dump shadow registers for RC and EP */
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: dumping RC shadow registers\n",
+			dev->rc_idx);
+		msm_pcie_shadow_dump(dev, true);
+
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: dumping EP shadow registers\n",
+			dev->rc_idx);
+		msm_pcie_shadow_dump(dev, false);
+		break;
+	case 5: /* disable L0s */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
+			dev->rc_idx);
+		msm_pcie_write_mask(dev->dm_core +
+				PCIE20_CAP_LINKCTRLSTATUS,
+				BIT(0), 0);
+		msm_pcie_write_mask(dev->conf +
+				ep_link_ctrlstts_offset,
+				BIT(0), 0);
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_link_ctrlstts_offset));
+		break;
+	case 6: /* enable L0s */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
+			dev->rc_idx);
+		msm_pcie_write_mask(dev->dm_core +
+				PCIE20_CAP_LINKCTRLSTATUS,
+				0, BIT(0));
+		msm_pcie_write_mask(dev->conf +
+				ep_link_ctrlstts_offset,
+				0, BIT(0));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_link_ctrlstts_offset));
+		break;
+	case 7: /* disable L1 */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
+			dev->rc_idx);
+		msm_pcie_write_mask(dev->dm_core +
+				PCIE20_CAP_LINKCTRLSTATUS,
+				BIT(1), 0);
+		msm_pcie_write_mask(dev->conf +
+				ep_link_ctrlstts_offset,
+				BIT(1), 0);
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_link_ctrlstts_offset));
+		break;
+	case 8: /* enable L1 */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
+			dev->rc_idx);
+		msm_pcie_write_mask(dev->dm_core +
+				PCIE20_CAP_LINKCTRLSTATUS,
+				0, BIT(1));
+		msm_pcie_write_mask(dev->conf +
+				ep_link_ctrlstts_offset,
+				0, BIT(1));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG_FS(dev, "PCIe: EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_link_ctrlstts_offset));
+		break;
+	case 9: /* disable L1ss */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
+			dev->rc_idx);
+		current_offset = PCIE_EXT_CAP_OFFSET;
+		while (current_offset) {
+			val = readl_relaxed(dev->conf + current_offset);
+			if ((val & 0xffff) == L1SUB_CAP_ID) {
+				ep_l1sub_ctrl1_offset =
+						current_offset + 0x8;
+				break;
+			}
+			current_offset = val >> 20;
+		}
+		if (!ep_l1sub_ctrl1_offset) {
+			PCIE_DBG_FS(dev,
+				"PCIe: RC%d endpoint does not support l1ss registers\n",
+				dev->rc_idx);
+			break;
+		}
+
+		PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
+				dev->rc_idx, ep_l1sub_ctrl1_offset);
+
+		msm_pcie_write_reg_field(dev->dm_core,
+					PCIE20_L1SUB_CONTROL1,
+					0xf, 0);
+		msm_pcie_write_mask(dev->dm_core +
+					PCIE20_DEVICE_CONTROL2_STATUS2,
+					BIT(10), 0);
+		msm_pcie_write_reg_field(dev->conf,
+					ep_l1sub_ctrl1_offset,
+					0xf, 0);
+		msm_pcie_write_mask(dev->conf +
+					ep_dev_ctrl2stts2_offset,
+					BIT(10), 0);
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
+				readl_relaxed(dev->dm_core +
+				PCIE20_L1SUB_CONTROL1);
+			dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
+				readl_relaxed(dev->dm_core +
+				PCIE20_DEVICE_CONTROL2_STATUS2);
+			dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
+				readl_relaxed(dev->conf +
+				ep_l1sub_ctrl1_offset);
+			dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
+				readl_relaxed(dev->conf +
+				ep_dev_ctrl2stts2_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_L1SUB_CONTROL1));
+		PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_DEVICE_CONTROL2_STATUS2));
+		PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_l1sub_ctrl1_offset));
+		PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_dev_ctrl2stts2_offset));
+		break;
+	case 10: /* enable L1ss */
+		PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
+			dev->rc_idx);
+		current_offset = PCIE_EXT_CAP_OFFSET;
+		while (current_offset) {
+			val = readl_relaxed(dev->conf + current_offset);
+			if ((val & 0xffff) == L1SUB_CAP_ID) {
+				ep_l1sub_cap_reg1_offset =
+						current_offset + 0x4;
+				ep_l1sub_ctrl1_offset =
+						current_offset + 0x8;
+				break;
+			}
+			current_offset = val >> 20;
+		}
+		if (!ep_l1sub_ctrl1_offset) {
+			PCIE_DBG_FS(dev,
+				"PCIe: RC%d endpoint does not support l1ss registers\n",
+				dev->rc_idx);
+			break;
+		}
+
+		val = readl_relaxed(dev->conf +
+				ep_l1sub_cap_reg1_offset);
+
+		PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CAPABILITY_REG_1: 0x%x\n",
+			val);
+		PCIE_DBG_FS(dev, "PCIe: RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
+			dev->rc_idx, ep_l1sub_ctrl1_offset);
+
+		val &= 0xf;
+
+		msm_pcie_write_reg_field(dev->dm_core,
+					PCIE20_L1SUB_CONTROL1,
+					0xf, val);
+		msm_pcie_write_mask(dev->dm_core +
+					PCIE20_DEVICE_CONTROL2_STATUS2,
+					0, BIT(10));
+		msm_pcie_write_reg_field(dev->conf,
+					ep_l1sub_ctrl1_offset,
+					0xf, val);
+		msm_pcie_write_mask(dev->conf +
+					ep_dev_ctrl2stts2_offset,
+					0, BIT(10));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
+				readl_relaxed(dev->dm_core +
+					PCIE20_L1SUB_CONTROL1);
+			dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
+				readl_relaxed(dev->dm_core +
+				PCIE20_DEVICE_CONTROL2_STATUS2);
+			dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
+				readl_relaxed(dev->conf +
+				ep_l1sub_ctrl1_offset);
+			dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
+				readl_relaxed(dev->conf +
+				ep_dev_ctrl2stts2_offset);
+		}
+		PCIE_DBG_FS(dev, "PCIe: RC's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_L1SUB_CONTROL1));
+		PCIE_DBG_FS(dev, "PCIe: RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_DEVICE_CONTROL2_STATUS2));
+		PCIE_DBG_FS(dev, "PCIe: EP's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_l1sub_ctrl1_offset));
+		PCIE_DBG_FS(dev, "PCIe: EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_dev_ctrl2stts2_offset));
+		break;
+	case 11: /* enumerate PCIe  */
+		PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
+			dev->rc_idx);
+		if (dev->enumerated)
+			PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
+				dev->rc_idx);
+		else {
+			if (!msm_pcie_enumerate(dev->rc_idx))
+				PCIE_DBG_FS(dev,
+					"PCIe: RC%d is successfully enumerated\n",
+					dev->rc_idx);
+			else
+				PCIE_DBG_FS(dev,
+					"PCIe: RC%d enumeration failed\n",
+					dev->rc_idx);
+		}
+		break;
+	case 12: /* write a value to a register */
+		PCIE_DBG_FS(dev,
+			"\n\nPCIe: RC%d: writing a value to a register\n\n",
+			dev->rc_idx);
+
+		if (!base_sel) {
+			PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
+			break;
+		}
+
+		PCIE_DBG_FS(dev,
+			"base: %s: 0x%p\nwr_offset: 0x%x\nwr_mask: 0x%x\nwr_value: 0x%x\n",
+			dev->res[base_sel - 1].name,
+			dev->res[base_sel - 1].base,
+			wr_offset, wr_mask, wr_value);
+
+		msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
+			wr_offset, wr_mask, wr_value);
+
+		break;
+	case 13: /* dump all registers of base_sel */
+		if (!base_sel) {
+			PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
+			break;
+		} else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
+			pcie_parf_dump(dev);
+			break;
+		} else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
+			pcie_phy_dump(dev);
+			break;
+		} else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
+			base_sel_size = 0x1000;
+		} else {
+			base_sel_size = resource_size(
+				dev->res[base_sel - 1].resource);
+		}
+
+		PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
+			dev->res[base_sel - 1].name, dev->rc_idx);
+
+		for (i = 0; i < base_sel_size; i += 32) {
+			PCIE_DBG_FS(dev,
+			"0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+			i, readl_relaxed(dev->res[base_sel - 1].base + i),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
+			readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
+		}
+		break;
+	default:
+		PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
+		break;
+	}
+}
+
+int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
+			u32 offset, u32 mask, u32 value)
+{
+	int ret = 0;
+	struct msm_pcie_dev_t *pdev = NULL;
+
+	if (!dev) {
+		pr_err("PCIe: the input pci dev is NULL.\n");
+		return -ENODEV;
+	}
+
+	if (option == 12 || option == 13) {
+		if (!base || base > 5) {
+			PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
+			PCIE_DBG_FS(pdev,
+				"PCIe: base_sel is still 0x%x\n", base_sel);
+			return -EINVAL;
+		}
+
+		base_sel = base;
+		PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
+
+		if (option == 12) {
+			wr_offset = offset;
+			wr_mask = mask;
+			wr_value = value;
+
+			PCIE_DBG_FS(pdev,
+				"PCIe: wr_offset is now 0x%x\n", wr_offset);
+			PCIE_DBG_FS(pdev,
+				"PCIe: wr_mask is now 0x%x\n", wr_mask);
+			PCIE_DBG_FS(pdev,
+				"PCIe: wr_value is now 0x%x\n", wr_value);
+		}
+	}
+
+	pdev = PCIE_BUS_PRIV_DATA(dev->bus);
+	rc_sel = 1 << pdev->rc_idx;
+
+	msm_pcie_sel_debug_testcase(pdev, option);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_debug_info);
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent_msm_pcie;
+static struct dentry *dfile_rc_sel;
+static struct dentry *dfile_case;
+static struct dentry *dfile_base_sel;
+static struct dentry *dfile_linkdown_panic;
+static struct dentry *dfile_wr_offset;
+static struct dentry *dfile_wr_mask;
+static struct dentry *dfile_wr_value;
+static struct dentry *dfile_ep_wakeirq;
+static struct dentry *dfile_aer_enable;
+static struct dentry *dfile_corr_counter_limit;
+
+static u32 rc_sel_max;
+
+static ssize_t msm_pcie_cmd_debug(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	unsigned int testcase = 0;
+	int i;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		testcase = (testcase * 10) + (str[i] - '0');
+
+	if (!rc_sel)
+		rc_sel = 1;
+
+	pr_alert("PCIe: TEST: %d\n", testcase);
+
+	for (i = 0; i < MAX_RC_NUM; i++) {
+		if (!((rc_sel >> i) & 0x1))
+			continue;
+		msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
+	}
+
+	return count;
+}
+
+const struct file_operations msm_pcie_cmd_debug_ops = {
+	.write = msm_pcie_cmd_debug,
+};
+
+static ssize_t msm_pcie_set_rc_sel(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+	u32 new_rc_sel = 0;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
+
+	if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
+		pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
+		pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
+	} else {
+		rc_sel = new_rc_sel;
+		pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
+	}
+
+	pr_alert("PCIe: the following RC(s) will be tested:\n");
+	for (i = 0; i < MAX_RC_NUM; i++) {
+		if (!rc_sel) {
+			pr_alert("RC %d\n", i);
+			break;
+		} else if (rc_sel & (1 << i)) {
+			pr_alert("RC %d\n", i);
+		}
+	}
+
+	return count;
+}
+
+const struct file_operations msm_pcie_rc_sel_ops = {
+	.write = msm_pcie_set_rc_sel,
+};
+
+static ssize_t msm_pcie_set_base_sel(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+	u32 new_base_sel = 0;
+	char *base_sel_name;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		new_base_sel = (new_base_sel * 10) + (str[i] - '0');
+
+	if (!new_base_sel || new_base_sel > 5) {
+		pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
+			new_base_sel);
+		pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
+	} else {
+		base_sel = new_base_sel;
+		pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
+	}
+
+	switch (base_sel) {
+	case 1:
+		base_sel_name = "PARF";
+		break;
+	case 2:
+		base_sel_name = "PHY";
+		break;
+	case 3:
+		base_sel_name = "RC CONFIG SPACE";
+		break;
+	case 4:
+		base_sel_name = "ELBI";
+		break;
+	case 5:
+		base_sel_name = "EP CONFIG SPACE";
+		break;
+	default:
+		base_sel_name = "INVALID";
+		break;
+	}
+
+	pr_alert("%s\n", base_sel_name);
+
+	return count;
+}
+
+const struct file_operations msm_pcie_base_sel_ops = {
+	.write = msm_pcie_set_base_sel,
+};
+
+static ssize_t msm_pcie_set_linkdown_panic(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	u32 new_linkdown_panic = 0;
+	int i;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		new_linkdown_panic = (new_linkdown_panic * 10) + (str[i] - '0');
+
+	if (new_linkdown_panic <= 1) {
+		for (i = 0; i < MAX_RC_NUM; i++) {
+			if (!rc_sel) {
+				msm_pcie_dev[0].linkdown_panic =
+					new_linkdown_panic;
+				PCIE_DBG_FS(&msm_pcie_dev[0],
+					"PCIe: RC0: linkdown_panic is now %d\n",
+					msm_pcie_dev[0].linkdown_panic);
+				break;
+			} else if (rc_sel & (1 << i)) {
+				msm_pcie_dev[i].linkdown_panic =
+					new_linkdown_panic;
+				PCIE_DBG_FS(&msm_pcie_dev[i],
+					"PCIe: RC%d: linkdown_panic is now %d\n",
+					i, msm_pcie_dev[i].linkdown_panic);
+			}
+		}
+	} else {
+		pr_err("PCIe: Invalid input for linkdown_panic: %d. Please enter 0 or 1.\n",
+			new_linkdown_panic);
+	}
+
+	return count;
+}
+
+const struct file_operations msm_pcie_linkdown_panic_ops = {
+	.write = msm_pcie_set_linkdown_panic,
+};
+
+static ssize_t msm_pcie_set_wr_offset(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	wr_offset = 0;
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		wr_offset = (wr_offset * 10) + (str[i] - '0');
+
+	pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
+
+	return count;
+}
+
+const struct file_operations msm_pcie_wr_offset_ops = {
+	.write = msm_pcie_set_wr_offset,
+};
+
+static ssize_t msm_pcie_set_wr_mask(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	wr_mask = 0;
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		wr_mask = (wr_mask * 10) + (str[i] - '0');
+
+	pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
+
+	return count;
+}
+
+const struct file_operations msm_pcie_wr_mask_ops = {
+	.write = msm_pcie_set_wr_mask,
+};
+static ssize_t msm_pcie_set_wr_value(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	wr_value = 0;
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		wr_value = (wr_value * 10) + (str[i] - '0');
+
+	pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
+
+	return count;
+}
+
+const struct file_operations msm_pcie_wr_value_ops = {
+	.write = msm_pcie_set_wr_value,
+};
+
+static ssize_t msm_pcie_set_ep_wakeirq(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	u32 new_ep_wakeirq = 0;
+	int i;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		new_ep_wakeirq = (new_ep_wakeirq * 10) + (str[i] - '0');
+
+	if (new_ep_wakeirq <= 1) {
+		for (i = 0; i < MAX_RC_NUM; i++) {
+			if (!rc_sel) {
+				msm_pcie_dev[0].ep_wakeirq = new_ep_wakeirq;
+				PCIE_DBG_FS(&msm_pcie_dev[0],
+					"PCIe: RC0: ep_wakeirq is now %d\n",
+					msm_pcie_dev[0].ep_wakeirq);
+				break;
+			} else if (rc_sel & (1 << i)) {
+				msm_pcie_dev[i].ep_wakeirq = new_ep_wakeirq;
+				PCIE_DBG_FS(&msm_pcie_dev[i],
+					"PCIe: RC%d: ep_wakeirq is now %d\n",
+					i, msm_pcie_dev[i].ep_wakeirq);
+			}
+		}
+	} else {
+		pr_err("PCIe: Invalid input for ep_wakeirq: %d. Please enter 0 or 1.\n",
+			new_ep_wakeirq);
+	}
+
+	return count;
+}
+
+const struct file_operations msm_pcie_ep_wakeirq_ops = {
+	.write = msm_pcie_set_ep_wakeirq,
+};
+
+static ssize_t msm_pcie_set_aer_enable(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	u32 new_aer_enable = 0;
+	u32 temp_rc_sel;
+	int i;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		new_aer_enable = (new_aer_enable * 10) + (str[i] - '0');
+
+	if (new_aer_enable > 1) {
+		pr_err(
+			"PCIe: Invalid input for aer_enable: %d. Please enter 0 or 1.\n",
+			new_aer_enable);
+		return count;
+	}
+
+	if (rc_sel)
+		temp_rc_sel = rc_sel;
+	else
+		temp_rc_sel = 0x1;
+
+	for (i = 0; i < MAX_RC_NUM; i++) {
+		if (temp_rc_sel & (1 << i)) {
+			msm_pcie_dev[i].aer_enable = new_aer_enable;
+			PCIE_DBG_FS(&msm_pcie_dev[i],
+				"PCIe: RC%d: aer_enable is now %d\n",
+				i, msm_pcie_dev[i].aer_enable);
+
+			msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
+					PCIE20_BRIDGE_CTRL,
+					new_aer_enable ? 0 : BIT(16),
+					new_aer_enable ? BIT(16) : 0);
+
+			PCIE_DBG_FS(&msm_pcie_dev[i],
+				"RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
+				readl_relaxed(msm_pcie_dev[i].dm_core +
+					PCIE20_BRIDGE_CTRL));
+		}
+	}
+
+	return count;
+}
+
+const struct file_operations msm_pcie_aer_enable_ops = {
+	.write = msm_pcie_set_aer_enable,
+};
+
+static ssize_t msm_pcie_set_corr_counter_limit(struct file *file,
+				const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	unsigned long ret;
+	char str[MAX_MSG_LEN];
+	int i;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, sizeof(str));
+	if (ret)
+		return -EFAULT;
+
+	corr_counter_limit = 0;
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
+
+	pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
+
+	return count;
+}
+
+const struct file_operations msm_pcie_corr_counter_limit_ops = {
+	.write = msm_pcie_set_corr_counter_limit,
+};
+
+static void msm_pcie_debugfs_init(void)
+{
+	rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
+	wr_mask = 0xffffffff;
+
+	dent_msm_pcie = debugfs_create_dir("pci-msm", 0);
+	if (IS_ERR(dent_msm_pcie)) {
+		pr_err("PCIe: fail to create the folder for debug_fs.\n");
+		return;
+	}
+
+	dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_rc_sel_ops);
+	if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
+		pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
+		goto rc_sel_error;
+	}
+
+	dfile_case = debugfs_create_file("case", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_cmd_debug_ops);
+	if (!dfile_case || IS_ERR(dfile_case)) {
+		pr_err("PCIe: fail to create the file for debug_fs case.\n");
+		goto case_error;
+	}
+
+	dfile_base_sel = debugfs_create_file("base_sel", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_base_sel_ops);
+	if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
+		pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
+		goto base_sel_error;
+	}
+
+	dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
+					dent_msm_pcie, 0,
+					&msm_pcie_linkdown_panic_ops);
+	if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
+		pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
+		goto linkdown_panic_error;
+	}
+
+	dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_wr_offset_ops);
+	if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
+		pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
+		goto wr_offset_error;
+	}
+
+	dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_wr_mask_ops);
+	if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
+		pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
+		goto wr_mask_error;
+	}
+
+	dfile_wr_value = debugfs_create_file("wr_value", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_wr_value_ops);
+	if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
+		pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
+		goto wr_value_error;
+	}
+
+	dfile_ep_wakeirq = debugfs_create_file("ep_wakeirq", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_ep_wakeirq_ops);
+	if (!dfile_ep_wakeirq || IS_ERR(dfile_ep_wakeirq)) {
+		pr_err("PCIe: fail to create the file for debug_fs ep_wakeirq.\n");
+		goto ep_wakeirq_error;
+	}
+
+	dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
+					dent_msm_pcie, 0,
+					&msm_pcie_aer_enable_ops);
+	if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
+		pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
+		goto aer_enable_error;
+	}
+
+	dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
+					0664, dent_msm_pcie, 0,
+					&msm_pcie_corr_counter_limit_ops);
+	if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
+		pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
+		goto corr_counter_limit_error;
+	}
+	return;
+
+corr_counter_limit_error:
+	debugfs_remove(dfile_aer_enable);
+aer_enable_error:
+	debugfs_remove(dfile_ep_wakeirq);
+ep_wakeirq_error:
+	debugfs_remove(dfile_wr_value);
+wr_value_error:
+	debugfs_remove(dfile_wr_mask);
+wr_mask_error:
+	debugfs_remove(dfile_wr_offset);
+wr_offset_error:
+	debugfs_remove(dfile_linkdown_panic);
+linkdown_panic_error:
+	debugfs_remove(dfile_base_sel);
+base_sel_error:
+	debugfs_remove(dfile_case);
+case_error:
+	debugfs_remove(dfile_rc_sel);
+rc_sel_error:
+	debugfs_remove(dent_msm_pcie);
+}
+
+static void msm_pcie_debugfs_exit(void)
+{
+	debugfs_remove(dfile_rc_sel);
+	debugfs_remove(dfile_case);
+	debugfs_remove(dfile_base_sel);
+	debugfs_remove(dfile_linkdown_panic);
+	debugfs_remove(dfile_wr_offset);
+	debugfs_remove(dfile_wr_mask);
+	debugfs_remove(dfile_wr_value);
+	debugfs_remove(dfile_ep_wakeirq);
+	debugfs_remove(dfile_aer_enable);
+	debugfs_remove(dfile_corr_counter_limit);
+}
+#else
+static void msm_pcie_debugfs_init(void)
+{
+}
+
+static void msm_pcie_debugfs_exit(void)
+{
+}
+#endif
+
+static inline int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
+{
+	return readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
+}
+
+/**
+ * msm_pcie_iatu_config - configure outbound address translation region
+ * @dev: root commpex
+ * @nr: region number
+ * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
+ * @host_addr: - region start address on host
+ * @host_end: - region end address (low 32 bit) on host,
+ *	upper 32 bits are same as for @host_addr
+ * @target_addr: - region start address on target
+ */
+static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
+				unsigned long host_addr, u32 host_end,
+				unsigned long target_addr)
+{
+	void __iomem *pcie20 = dev->dm_core;
+
+	if (dev->shadow_en) {
+		dev->rc_shadow[PCIE20_PLR_IATU_VIEWPORT / 4] =
+			nr;
+		dev->rc_shadow[PCIE20_PLR_IATU_CTRL1 / 4] =
+			type;
+		dev->rc_shadow[PCIE20_PLR_IATU_LBAR / 4] =
+			lower_32_bits(host_addr);
+		dev->rc_shadow[PCIE20_PLR_IATU_UBAR / 4] =
+			upper_32_bits(host_addr);
+		dev->rc_shadow[PCIE20_PLR_IATU_LAR / 4] =
+			host_end;
+		dev->rc_shadow[PCIE20_PLR_IATU_LTAR / 4] =
+			lower_32_bits(target_addr);
+		dev->rc_shadow[PCIE20_PLR_IATU_UTAR / 4] =
+			upper_32_bits(target_addr);
+		dev->rc_shadow[PCIE20_PLR_IATU_CTRL2 / 4] =
+			BIT(31);
+	}
+
+	/* select region */
+	writel_relaxed(nr, pcie20 + PCIE20_PLR_IATU_VIEWPORT);
+	/* ensure that hardware locks it */
+	wmb();
+
+	/* switch off region before changing it */
+	writel_relaxed(0, pcie20 + PCIE20_PLR_IATU_CTRL2);
+	/* and wait till it propagates to the hardware */
+	wmb();
+
+	writel_relaxed(type, pcie20 + PCIE20_PLR_IATU_CTRL1);
+	writel_relaxed(lower_32_bits(host_addr),
+		       pcie20 + PCIE20_PLR_IATU_LBAR);
+	writel_relaxed(upper_32_bits(host_addr),
+		       pcie20 + PCIE20_PLR_IATU_UBAR);
+	writel_relaxed(host_end, pcie20 + PCIE20_PLR_IATU_LAR);
+	writel_relaxed(lower_32_bits(target_addr),
+		       pcie20 + PCIE20_PLR_IATU_LTAR);
+	writel_relaxed(upper_32_bits(target_addr),
+		       pcie20 + PCIE20_PLR_IATU_UTAR);
+	/* ensure that changes propagated to the hardware */
+	wmb();
+	writel_relaxed(BIT(31), pcie20 + PCIE20_PLR_IATU_CTRL2);
+
+	/* ensure that changes propagated to the hardware */
+	wmb();
+
+	if (dev->enumerated) {
+		PCIE_DBG2(dev, "IATU for Endpoint %02x:%02x.%01x\n",
+			dev->pcidev_table[nr].bdf >> 24,
+			dev->pcidev_table[nr].bdf >> 19 & 0x1f,
+			dev->pcidev_table[nr].bdf >> 16 & 0x07);
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_VIEWPORT:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_VIEWPORT));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL1:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL1));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_LBAR:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LBAR));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_UBAR:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UBAR));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_LAR:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LAR));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_LTAR:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_LTAR));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_UTAR:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_UTAR));
+		PCIE_DBG2(dev, "PCIE20_PLR_IATU_CTRL2:0x%x\n\n",
+			readl_relaxed(dev->dm_core + PCIE20_PLR_IATU_CTRL2));
+	}
+}
+
+/**
+ * msm_pcie_cfg_bdf - configure for config access
+ * @dev: root commpex
+ * @bus: PCI bus number
+ * @devfn: PCI dev and function number
+ *
+ * Remap if required region 0 for config access of proper type
+ * (CFG0 for bus 1, CFG1 for other buses)
+ * Cache current device bdf for speed-up
+ */
+static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
+{
+	struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
+	u32 bdf  = BDF_OFFSET(bus, devfn);
+	u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
+
+	if (dev->current_bdf == bdf)
+		return;
+
+	msm_pcie_iatu_config(dev, 0, type,
+			axi_conf->start,
+			axi_conf->start + SZ_4K - 1,
+			bdf);
+
+	dev->current_bdf = bdf;
+}
+
+static inline void msm_pcie_save_shadow(struct msm_pcie_dev_t *dev,
+					u32 word_offset, u32 wr_val,
+					u32 bdf, bool rc)
+{
+	int i, j;
+	u32 max_dev = MAX_RC_NUM * MAX_DEVICE_NUM;
+
+	if (rc) {
+		dev->rc_shadow[word_offset / 4] = wr_val;
+	} else {
+		for (i = 0; i < MAX_DEVICE_NUM; i++) {
+			if (!dev->pcidev_table[i].bdf) {
+				for (j = 0; j < max_dev; j++)
+					if (!msm_pcie_dev_tbl[j].bdf) {
+						msm_pcie_dev_tbl[j].bdf = bdf;
+						break;
+					}
+				dev->pcidev_table[i].bdf = bdf;
+				if ((!dev->bridge_found) && (i > 0))
+					dev->bridge_found = true;
+			}
+			if (dev->pcidev_table[i].bdf == bdf) {
+				dev->ep_shadow[i][word_offset / 4] = wr_val;
+				break;
+			}
+		}
+	}
+}
+
+static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
+				     int where, int size, u32 *val)
+{
+	uint32_t word_offset, byte_offset, mask;
+	uint32_t rd_val, wr_val;
+	struct msm_pcie_dev_t *dev;
+	void __iomem *config_base;
+	bool rc = false;
+	u32 rc_idx;
+	int rv = 0;
+	u32 bdf = BDF_OFFSET(bus->number, devfn);
+	int i;
+
+	dev = PCIE_BUS_PRIV_DATA(bus);
+
+	if (!dev) {
+		pr_err("PCIe: No device found for this bus.\n");
+		*val = ~0;
+		rv = PCIBIOS_DEVICE_NOT_FOUND;
+		goto out;
+	}
+
+	rc_idx = dev->rc_idx;
+	rc = (bus->number == 0);
+
+	spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
+
+	if (!dev->cfg_access) {
+		PCIE_DBG3(dev,
+			"Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
+			rc_idx, bus->number, devfn, where, size);
+		*val = ~0;
+		rv = PCIBIOS_DEVICE_NOT_FOUND;
+		goto unlock;
+	}
+
+	if (rc && (devfn != 0)) {
+		PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
+			 (oper == RD) ? "rd" : "wr", bus->number, devfn);
+		*val = ~0;
+		rv = PCIBIOS_DEVICE_NOT_FOUND;
+		goto unlock;
+	}
+
+	if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
+		PCIE_DBG3(dev,
+			"Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
+			rc_idx, bus->number, devfn, where, size);
+		*val = ~0;
+		rv = PCIBIOS_DEVICE_NOT_FOUND;
+		goto unlock;
+	}
+
+	/* check if the link is up for endpoint */
+	if (!rc && !msm_pcie_is_link_up(dev)) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
+				rc_idx, (oper == RD) ? "rd" : "wr",
+				bus->number, devfn);
+			*val = ~0;
+			rv = PCIBIOS_DEVICE_NOT_FOUND;
+			goto unlock;
+	}
+
+	if (!rc && !dev->enumerated)
+		msm_pcie_cfg_bdf(dev, bus->number, devfn);
+
+	word_offset = where & ~0x3;
+	byte_offset = where & 0x3;
+	mask = (~0 >> (8 * (4 - size))) << (8 * byte_offset);
+
+	if (rc || !dev->enumerated) {
+		config_base = rc ? dev->dm_core : dev->conf;
+	} else {
+		for (i = 0; i < MAX_DEVICE_NUM; i++) {
+			if (dev->pcidev_table[i].bdf == bdf) {
+				config_base = dev->pcidev_table[i].conf_base;
+				break;
+			}
+		}
+		if (i == MAX_DEVICE_NUM) {
+			*val = ~0;
+			rv = PCIBIOS_DEVICE_NOT_FOUND;
+			goto unlock;
+		}
+	}
+
+	rd_val = readl_relaxed(config_base + word_offset);
+
+	if (oper == RD) {
+		*val = ((rd_val & mask) >> (8 * byte_offset));
+		PCIE_DBG3(dev,
+			"RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
+			rc_idx, bus->number, devfn, where, size, *val, rd_val);
+	} else {
+		wr_val = (rd_val & ~mask) |
+				((*val << (8 * byte_offset)) & mask);
+
+		if ((bus->number == 0) && (where == 0x3c))
+			wr_val = wr_val | (3 << 16);
+
+		writel_relaxed(wr_val, config_base + word_offset);
+		wmb(); /* ensure config data is written to hardware register */
+
+		if (rd_val == PCIE_LINK_DOWN)
+			PCIE_ERR(dev,
+				"Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
+				rc_idx, bus->number, devfn, where, size);
+		else if (dev->shadow_en)
+			msm_pcie_save_shadow(dev, word_offset, wr_val, bdf, rc);
+
+		PCIE_DBG3(dev,
+			"RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
+			rc_idx, bus->number, devfn, where, size,
+			wr_val, rd_val, *val);
+	}
+
+unlock:
+	spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
+out:
+	return rv;
+}
+
+static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+			    int size, u32 *val)
+{
+	int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
+
+	if ((bus->number == 0) && (where == PCI_CLASS_REVISION)) {
+		*val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
+		PCIE_GEN_DBG("change class for RC:0x%x\n", *val);
+	}
+
+	return ret;
+}
+
+static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+			    int where, int size, u32 val)
+{
+	return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
+}
+
+static struct pci_ops msm_pcie_ops = {
+	.read = msm_pcie_rd_conf,
+	.write = msm_pcie_wr_conf,
+};
+
+static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
+{
+	int rc = 0, i;
+	struct msm_pcie_gpio_info_t *info;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	for (i = 0; i < dev->gpio_n; i++) {
+		info = &dev->gpio[i];
+
+		if (!info->num)
+			continue;
+
+		rc = gpio_request(info->num, info->name);
+		if (rc) {
+			PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
+				dev->rc_idx, info->name, rc);
+			break;
+		}
+
+		if (info->out)
+			rc = gpio_direction_output(info->num, info->init);
+		else
+			rc = gpio_direction_input(info->num);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d can't set direction for GPIO %s:%d\n",
+				dev->rc_idx, info->name, rc);
+			gpio_free(info->num);
+			break;
+		}
+	}
+
+	if (rc)
+		while (i--)
+			gpio_free(dev->gpio[i].num);
+
+	return rc;
+}
+
+static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
+{
+	int i;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	for (i = 0; i < dev->gpio_n; i++)
+		gpio_free(dev->gpio[i].num);
+}
+
+int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
+{
+	int i, rc = 0;
+	struct regulator *vreg;
+	struct msm_pcie_vreg_info_t *info;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
+		info = &dev->vreg[i];
+		vreg = info->hdl;
+
+		if (!vreg)
+			continue;
+
+		PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
+			dev->rc_idx, info->name);
+		if (info->max_v) {
+			rc = regulator_set_voltage(vreg,
+						   info->min_v, info->max_v);
+			if (rc) {
+				PCIE_ERR(dev,
+					"PCIe: RC%d can't set voltage for %s: %d\n",
+					dev->rc_idx, info->name, rc);
+				break;
+			}
+		}
+
+		if (info->opt_mode) {
+			rc = regulator_set_load(vreg, info->opt_mode);
+			if (rc < 0) {
+				PCIE_ERR(dev,
+					"PCIe: RC%d can't set mode for %s: %d\n",
+					dev->rc_idx, info->name, rc);
+				break;
+			}
+		}
+
+		rc = regulator_enable(vreg);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d can't enable regulator %s: %d\n",
+				dev->rc_idx, info->name, rc);
+			break;
+		}
+	}
+
+	if (rc)
+		while (i--) {
+			struct regulator *hdl = dev->vreg[i].hdl;
+
+			if (hdl) {
+				regulator_disable(hdl);
+				if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
+					PCIE_DBG(dev,
+						"RC%d: Removing %s vote.\n",
+						dev->rc_idx,
+						dev->vreg[i].name);
+					regulator_set_voltage(hdl,
+						RPM_REGULATOR_CORNER_NONE,
+						INT_MAX);
+				}
+			}
+
+		}
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+	return rc;
+}
+
+static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
+{
+	int i;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
+		if (dev->vreg[i].hdl) {
+			PCIE_DBG(dev, "Vreg %s is being disabled\n",
+				dev->vreg[i].name);
+			regulator_disable(dev->vreg[i].hdl);
+
+			if (!strcmp(dev->vreg[i].name, "vreg-cx")) {
+				PCIE_DBG(dev,
+					"RC%d: Removing %s vote.\n",
+					dev->rc_idx,
+					dev->vreg[i].name);
+				regulator_set_voltage(dev->vreg[i].hdl,
+					RPM_REGULATOR_CORNER_NONE,
+					INT_MAX);
+			}
+		}
+	}
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
+{
+	int i, rc = 0;
+	struct msm_pcie_clk_info_t *info;
+	struct msm_pcie_reset_info_t *reset_info;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	rc = regulator_enable(dev->gdsc);
+
+	if (rc) {
+		PCIE_ERR(dev, "PCIe: fail to enable GDSC for RC%d (%s)\n",
+			dev->rc_idx, dev->pdev->name);
+		return rc;
+	}
+
+	if (dev->gdsc_smmu) {
+		rc = regulator_enable(dev->gdsc_smmu);
+
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: fail to enable SMMU GDSC for RC%d (%s)\n",
+				dev->rc_idx, dev->pdev->name);
+			return rc;
+		}
+	}
+
+	PCIE_DBG(dev, "PCIe: requesting bus vote for RC%d\n", dev->rc_idx);
+	if (dev->bus_client) {
+		rc = msm_bus_scale_client_update_request(dev->bus_client, 1);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: fail to set bus bandwidth for RC%d:%d.\n",
+				dev->rc_idx, rc);
+			return rc;
+		}
+
+		PCIE_DBG2(dev,
+			"PCIe: set bus bandwidth for RC%d.\n",
+			dev->rc_idx);
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
+		info = &dev->clk[i];
+
+		if (!info->hdl)
+			continue;
+
+		if (info->config_mem)
+			msm_pcie_config_clock_mem(dev, info);
+
+		if (info->freq) {
+			rc = clk_set_rate(info->hdl, info->freq);
+			if (rc) {
+				PCIE_ERR(dev,
+					"PCIe: RC%d can't set rate for clk %s: %d.\n",
+					dev->rc_idx, info->name, rc);
+				break;
+			}
+
+			PCIE_DBG2(dev,
+				"PCIe: RC%d set rate for clk %s.\n",
+				dev->rc_idx, info->name);
+		}
+
+		rc = clk_prepare_enable(info->hdl);
+
+		if (rc)
+			PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
+				dev->rc_idx, info->name);
+		else
+			PCIE_DBG2(dev, "enable clk %s for RC%d.\n",
+				info->name, dev->rc_idx);
+	}
+
+	if (rc) {
+		PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
+			dev->rc_idx);
+		while (i--) {
+			struct clk *hdl = dev->clk[i].hdl;
+
+			if (hdl)
+				clk_disable_unprepare(hdl);
+		}
+
+		if (dev->gdsc_smmu)
+			regulator_disable(dev->gdsc_smmu);
+
+		regulator_disable(dev->gdsc);
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
+		reset_info = &dev->reset[i];
+		if (reset_info->hdl) {
+			rc = reset_control_deassert(reset_info->hdl);
+			if (rc)
+				PCIE_ERR(dev,
+					"PCIe: RC%d failed to deassert reset for %s.\n",
+					dev->rc_idx, reset_info->name);
+			else
+				PCIE_DBG2(dev,
+					"PCIe: RC%d successfully deasserted reset for %s.\n",
+					dev->rc_idx, reset_info->name);
+		}
+	}
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+	return rc;
+}
+
+static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
+{
+	int i;
+	int rc;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	for (i = 0; i < MSM_PCIE_MAX_CLK; i++)
+		if (dev->clk[i].hdl)
+			clk_disable_unprepare(dev->clk[i].hdl);
+
+	if (dev->bus_client) {
+		PCIE_DBG(dev, "PCIe: removing bus vote for RC%d\n",
+			dev->rc_idx);
+
+		rc = msm_bus_scale_client_update_request(dev->bus_client, 0);
+		if (rc)
+			PCIE_ERR(dev,
+				"PCIe: fail to relinquish bus bandwidth for RC%d:%d.\n",
+				dev->rc_idx, rc);
+		else
+			PCIE_DBG(dev,
+				"PCIe: relinquish bus bandwidth for RC%d.\n",
+				dev->rc_idx);
+	}
+
+	if (dev->gdsc_smmu)
+		regulator_disable(dev->gdsc_smmu);
+
+	regulator_disable(dev->gdsc);
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
+{
+	int i, rc = 0;
+	struct msm_pcie_clk_info_t *info;
+	struct msm_pcie_reset_info_t *pipe_reset_info;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
+		info = &dev->pipeclk[i];
+
+		if (!info->hdl)
+			continue;
+
+
+		if (info->config_mem)
+			msm_pcie_config_clock_mem(dev, info);
+
+		if (info->freq) {
+			rc = clk_set_rate(info->hdl, info->freq);
+			if (rc) {
+				PCIE_ERR(dev,
+					"PCIe: RC%d can't set rate for clk %s: %d.\n",
+					dev->rc_idx, info->name, rc);
+				break;
+			}
+
+			PCIE_DBG2(dev,
+				"PCIe: RC%d set rate for clk %s: %d.\n",
+				dev->rc_idx, info->name, rc);
+		}
+
+		rc = clk_prepare_enable(info->hdl);
+
+		if (rc)
+			PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
+				dev->rc_idx, info->name);
+		else
+			PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n",
+				dev->rc_idx, info->name);
+	}
+
+	if (rc) {
+		PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
+			dev->rc_idx);
+		while (i--)
+			if (dev->pipeclk[i].hdl)
+				clk_disable_unprepare(dev->pipeclk[i].hdl);
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
+		pipe_reset_info = &dev->pipe_reset[i];
+		if (pipe_reset_info->hdl) {
+			rc = reset_control_deassert(
+					pipe_reset_info->hdl);
+			if (rc)
+				PCIE_ERR(dev,
+					"PCIe: RC%d failed to deassert pipe reset for %s.\n",
+					dev->rc_idx, pipe_reset_info->name);
+			else
+				PCIE_DBG2(dev,
+					"PCIe: RC%d successfully deasserted pipe reset for %s.\n",
+					dev->rc_idx, pipe_reset_info->name);
+		}
+	}
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+	return rc;
+}
+
+static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
+{
+	int i;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++)
+		if (dev->pipeclk[i].hdl)
+			clk_disable_unprepare(
+				dev->pipeclk[i].hdl);
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static void msm_pcie_iatu_config_all_ep(struct msm_pcie_dev_t *dev)
+{
+	int i;
+	u8 type;
+	struct msm_pcie_device_info *dev_table = dev->pcidev_table;
+
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		if (!dev_table[i].bdf)
+			break;
+
+		type = dev_table[i].bdf >> 24 == 0x1 ?
+			PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
+
+		msm_pcie_iatu_config(dev, i, type, dev_table[i].phy_address,
+			dev_table[i].phy_address + SZ_4K - 1,
+			dev_table[i].bdf);
+	}
+}
+
+static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
+{
+	int i;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	/*
+	 * program and enable address translation region 0 (device config
+	 * address space); region type config;
+	 * axi config address range to device config address range
+	 */
+	if (dev->enumerated) {
+		msm_pcie_iatu_config_all_ep(dev);
+	} else {
+		dev->current_bdf = 0; /* to force IATU re-config */
+		msm_pcie_cfg_bdf(dev, 1, 0);
+	}
+
+	/* configure N_FTS */
+	PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
+		readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
+	if (!dev->n_fts)
+		msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
+					0, BIT(15));
+	else
+		msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
+					PCIE20_ACK_N_FTS,
+					dev->n_fts << 8);
+
+	if (dev->shadow_en)
+		dev->rc_shadow[PCIE20_ACK_F_ASPM_CTRL_REG / 4] =
+			readl_relaxed(dev->dm_core +
+			PCIE20_ACK_F_ASPM_CTRL_REG);
+
+	PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
+		readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
+
+	/* configure AUX clock frequency register for PCIe core */
+	if (dev->use_19p2mhz_aux_clk)
+		msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x14);
+	else
+		msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, 0x01);
+
+	/* configure the completion timeout value for PCIe core */
+	if (dev->cpl_timeout && dev->bridge_found)
+		msm_pcie_write_reg_field(dev->dm_core,
+					PCIE20_DEVICE_CONTROL2_STATUS2,
+					0xf, dev->cpl_timeout);
+
+	/* Enable AER on RC */
+	if (dev->aer_enable) {
+		msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
+						BIT(16)|BIT(17));
+		msm_pcie_write_mask(dev->dm_core +  PCIE20_CAP_DEVCTRLSTATUS, 0,
+						BIT(3)|BIT(2)|BIT(1)|BIT(0));
+
+		PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
+	}
+
+	/* configure SMMU registers */
+	if (dev->smmu_exist) {
+		msm_pcie_write_reg(dev->parf,
+			PCIE20_PARF_BDF_TRANSLATE_CFG, 0);
+		msm_pcie_write_reg(dev->parf,
+			PCIE20_PARF_SID_OFFSET, 0);
+
+		if (dev->enumerated) {
+			for (i = 0; i < MAX_DEVICE_NUM; i++) {
+				if (dev->pcidev_table[i].dev &&
+					dev->pcidev_table[i].short_bdf) {
+					msm_pcie_write_reg(dev->parf,
+						PCIE20_PARF_BDF_TRANSLATE_N +
+						dev->pcidev_table[i].short_bdf
+						* 4,
+						dev->pcidev_table[i].bdf >> 16);
+				}
+			}
+		}
+	}
+}
+
+static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev)
+{
+	u32 val;
+	u32 current_offset;
+	u32 ep_l1sub_ctrl1_offset = 0;
+	u32 ep_l1sub_cap_reg1_offset = 0;
+	u32 ep_link_cap_offset = 0;
+	u32 ep_link_ctrlstts_offset = 0;
+	u32 ep_dev_ctrl2stts2_offset = 0;
+
+	/* Enable the AUX Clock and the Core Clk to be synchronous for L1SS*/
+	if (!dev->aux_clk_sync && dev->l1ss_supported)
+		msm_pcie_write_mask(dev->parf +
+				PCIE20_PARF_SYS_CTRL, BIT(3), 0);
+
+	current_offset = readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xff;
+
+	while (current_offset) {
+		if (msm_pcie_check_align(dev, current_offset))
+			return;
+
+		val = readl_relaxed(dev->conf + current_offset);
+		if ((val & 0xff) == PCIE20_CAP_ID) {
+			ep_link_cap_offset = current_offset + 0x0c;
+			ep_link_ctrlstts_offset = current_offset + 0x10;
+			ep_dev_ctrl2stts2_offset = current_offset + 0x28;
+			break;
+		}
+		current_offset = (val >> 8) & 0xff;
+	}
+
+	if (!ep_link_cap_offset) {
+		PCIE_DBG(dev,
+			"RC%d endpoint does not support PCIe capability registers\n",
+			dev->rc_idx);
+		return;
+	}
+
+	PCIE_DBG(dev,
+		"RC%d: ep_link_cap_offset: 0x%x\n",
+		dev->rc_idx, ep_link_cap_offset);
+
+	if (dev->common_clk_en) {
+		msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
+					0, BIT(6));
+
+		msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
+					0, BIT(6));
+
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+				readl_relaxed(dev->dm_core +
+					PCIE20_CAP_LINKCTRLSTATUS);
+
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+				readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset);
+		}
+
+		PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
+	}
+
+	if (dev->clk_power_manage_en) {
+		val = readl_relaxed(dev->conf + ep_link_cap_offset);
+		if (val & BIT(18)) {
+			msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
+						0, BIT(8));
+
+			if (dev->shadow_en)
+				dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+					readl_relaxed(dev->conf +
+						ep_link_ctrlstts_offset);
+
+			PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
+				readl_relaxed(dev->conf +
+					ep_link_ctrlstts_offset));
+		}
+	}
+
+	if (dev->l0s_supported) {
+		msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
+					0, BIT(0));
+		msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
+					0, BIT(0));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+						readl_relaxed(dev->dm_core +
+						PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+						readl_relaxed(dev->conf +
+						ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
+	}
+
+	if (dev->l1_supported) {
+		msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS,
+					0, BIT(1));
+		msm_pcie_write_mask(dev->conf + ep_link_ctrlstts_offset,
+					0, BIT(1));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_CAP_LINKCTRLSTATUS / 4] =
+						readl_relaxed(dev->dm_core +
+						PCIE20_CAP_LINKCTRLSTATUS);
+			dev->ep_shadow[0][ep_link_ctrlstts_offset / 4] =
+						readl_relaxed(dev->conf +
+						ep_link_ctrlstts_offset);
+		}
+		PCIE_DBG2(dev, "RC's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_CAP_LINKCTRLSTATUS));
+		PCIE_DBG2(dev, "EP's CAP_LINKCTRLSTATUS:0x%x\n",
+			readl_relaxed(dev->conf + ep_link_ctrlstts_offset));
+	}
+
+	if (dev->l1ss_supported) {
+		current_offset = PCIE_EXT_CAP_OFFSET;
+		while (current_offset) {
+			if (msm_pcie_check_align(dev, current_offset))
+				return;
+
+			val = readl_relaxed(dev->conf + current_offset);
+			if ((val & 0xffff) == L1SUB_CAP_ID) {
+				ep_l1sub_cap_reg1_offset = current_offset + 0x4;
+				ep_l1sub_ctrl1_offset = current_offset + 0x8;
+				break;
+			}
+			current_offset = val >> 20;
+		}
+		if (!ep_l1sub_ctrl1_offset) {
+			PCIE_DBG(dev,
+				"RC%d endpoint does not support l1ss registers\n",
+				dev->rc_idx);
+			return;
+		}
+
+		val = readl_relaxed(dev->conf + ep_l1sub_cap_reg1_offset);
+
+		PCIE_DBG2(dev, "EP's L1SUB_CAPABILITY_REG_1: 0x%x\n", val);
+		PCIE_DBG2(dev, "RC%d: ep_l1sub_ctrl1_offset: 0x%x\n",
+				dev->rc_idx, ep_l1sub_ctrl1_offset);
+
+		val &= 0xf;
+
+		msm_pcie_write_reg_field(dev->dm_core, PCIE20_L1SUB_CONTROL1,
+					0xf, val);
+		msm_pcie_write_mask(dev->dm_core +
+					PCIE20_DEVICE_CONTROL2_STATUS2,
+					0, BIT(10));
+		msm_pcie_write_reg_field(dev->conf, ep_l1sub_ctrl1_offset,
+					0xf, val);
+		msm_pcie_write_mask(dev->conf + ep_dev_ctrl2stts2_offset,
+					0, BIT(10));
+		if (dev->shadow_en) {
+			dev->rc_shadow[PCIE20_L1SUB_CONTROL1 / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_L1SUB_CONTROL1);
+			dev->rc_shadow[PCIE20_DEVICE_CONTROL2_STATUS2 / 4] =
+					readl_relaxed(dev->dm_core +
+					PCIE20_DEVICE_CONTROL2_STATUS2);
+			dev->ep_shadow[0][ep_l1sub_ctrl1_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_l1sub_ctrl1_offset);
+			dev->ep_shadow[0][ep_dev_ctrl2stts2_offset / 4] =
+					readl_relaxed(dev->conf +
+					ep_dev_ctrl2stts2_offset);
+		}
+		PCIE_DBG2(dev, "RC's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->dm_core + PCIE20_L1SUB_CONTROL1));
+		PCIE_DBG2(dev, "RC's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->dm_core +
+			PCIE20_DEVICE_CONTROL2_STATUS2));
+		PCIE_DBG2(dev, "EP's L1SUB_CONTROL1:0x%x\n",
+			readl_relaxed(dev->conf + ep_l1sub_ctrl1_offset));
+		PCIE_DBG2(dev, "EP's DEVICE_CONTROL2_STATUS2:0x%x\n",
+			readl_relaxed(dev->conf +
+			ep_dev_ctrl2stts2_offset));
+	}
+}
+
+void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev)
+{
+	int i;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	/* program MSI controller and enable all interrupts */
+	writel_relaxed(MSM_PCIE_MSI_PHY, dev->dm_core + PCIE20_MSI_CTRL_ADDR);
+	writel_relaxed(0, dev->dm_core + PCIE20_MSI_CTRL_UPPER_ADDR);
+
+	for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
+		writel_relaxed(~0, dev->dm_core +
+			       PCIE20_MSI_CTRL_INTR_EN + (i * 12));
+
+	/* ensure that hardware is configured before proceeding */
+	wmb();
+}
+
+static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
+					struct platform_device *pdev)
+{
+	int i, len, cnt, ret = 0, size = 0;
+	struct msm_pcie_vreg_info_t *vreg_info;
+	struct msm_pcie_gpio_info_t *gpio_info;
+	struct msm_pcie_clk_info_t  *clk_info;
+	struct resource *res;
+	struct msm_pcie_res_info_t *res_info;
+	struct msm_pcie_irq_info_t *irq_info;
+	struct msm_pcie_irq_info_t *msi_info;
+	struct msm_pcie_reset_info_t *reset_info;
+	struct msm_pcie_reset_info_t *pipe_reset_info;
+	char prop_name[MAX_PROP_SIZE];
+	const __be32 *prop;
+	u32 *clkfreq = NULL;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	cnt = of_property_count_strings((&pdev->dev)->of_node,
+			"clock-names");
+	if (cnt > 0) {
+		clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
+					sizeof(*clkfreq), GFP_KERNEL);
+		if (!clkfreq) {
+			PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
+					dev->rc_idx);
+			return -ENOMEM;
+		}
+		ret = of_property_read_u32_array(
+			(&pdev->dev)->of_node,
+			"max-clock-frequency-hz", clkfreq, cnt);
+		if (ret) {
+			PCIE_ERR(dev,
+				"PCIe: invalid max-clock-frequency-hz property for RC%d:%d\n",
+				dev->rc_idx, ret);
+			goto out;
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
+		vreg_info = &dev->vreg[i];
+		vreg_info->hdl =
+				devm_regulator_get(&pdev->dev, vreg_info->name);
+
+		if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
+			PCIE_DBG(dev, "EPROBE_DEFER for VReg:%s\n",
+				vreg_info->name);
+			ret = PTR_ERR(vreg_info->hdl);
+			goto out;
+		}
+
+		if (IS_ERR(vreg_info->hdl)) {
+			if (vreg_info->required) {
+				PCIE_DBG(dev, "Vreg %s doesn't exist\n",
+					vreg_info->name);
+				ret = PTR_ERR(vreg_info->hdl);
+				goto out;
+			} else {
+				PCIE_DBG(dev,
+					"Optional Vreg %s doesn't exist\n",
+					vreg_info->name);
+				vreg_info->hdl = NULL;
+			}
+		} else {
+			dev->vreg_n++;
+			snprintf(prop_name, MAX_PROP_SIZE,
+				"qcom,%s-voltage-level", vreg_info->name);
+			prop = of_get_property((&pdev->dev)->of_node,
+						prop_name, &len);
+			if (!prop || (len != (3 * sizeof(__be32)))) {
+				PCIE_DBG(dev, "%s %s property\n",
+					prop ? "invalid format" :
+					"no", prop_name);
+			} else {
+				vreg_info->max_v = be32_to_cpup(&prop[0]);
+				vreg_info->min_v = be32_to_cpup(&prop[1]);
+				vreg_info->opt_mode =
+					be32_to_cpup(&prop[2]);
+			}
+		}
+	}
+
+	dev->gdsc = devm_regulator_get(&pdev->dev, "gdsc-vdd");
+
+	if (IS_ERR(dev->gdsc)) {
+		PCIE_ERR(dev, "PCIe: RC%d Failed to get %s GDSC:%ld\n",
+			dev->rc_idx, dev->pdev->name, PTR_ERR(dev->gdsc));
+		if (PTR_ERR(dev->gdsc) == -EPROBE_DEFER)
+			PCIE_DBG(dev, "PCIe: EPROBE_DEFER for %s GDSC\n",
+					dev->pdev->name);
+		ret = PTR_ERR(dev->gdsc);
+		goto out;
+	}
+
+	dev->gdsc_smmu = devm_regulator_get(&pdev->dev, "gdsc-smmu");
+
+	if (IS_ERR(dev->gdsc_smmu)) {
+		PCIE_DBG(dev, "PCIe: RC%d SMMU GDSC does not exist",
+			dev->rc_idx);
+		dev->gdsc_smmu = NULL;
+	}
+
+	dev->gpio_n = 0;
+	for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
+		gpio_info = &dev->gpio[i];
+		ret = of_get_named_gpio((&pdev->dev)->of_node,
+					gpio_info->name, 0);
+		if (ret >= 0) {
+			gpio_info->num = ret;
+			dev->gpio_n++;
+			PCIE_DBG(dev, "GPIO num for %s is %d\n",
+				gpio_info->name, gpio_info->num);
+		} else {
+			if (gpio_info->required) {
+				PCIE_ERR(dev,
+					"Could not get required GPIO %s\n",
+					gpio_info->name);
+				goto out;
+			} else {
+				PCIE_DBG(dev,
+					"Could not get optional GPIO %s\n",
+					gpio_info->name);
+			}
+		}
+		ret = 0;
+	}
+
+	of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
+	if (size) {
+		dev->phy_sequence = (struct msm_pcie_phy_info_t *)
+			devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+
+		if (dev->phy_sequence) {
+			dev->phy_len =
+				size / sizeof(*dev->phy_sequence);
+
+			of_property_read_u32_array(pdev->dev.of_node,
+				"qcom,phy-sequence",
+				(unsigned int *)dev->phy_sequence,
+				size / sizeof(dev->phy_sequence->offset));
+		} else {
+			PCIE_ERR(dev,
+				"RC%d: Could not allocate memory for phy init sequence.\n",
+				dev->rc_idx);
+			ret = -ENOMEM;
+			goto out;
+		}
+	} else {
+		PCIE_DBG(dev, "RC%d: phy sequence is not present in DT\n",
+			dev->rc_idx);
+	}
+
+	of_get_property(pdev->dev.of_node, "qcom,port-phy-sequence", &size);
+	if (size) {
+		dev->port_phy_sequence = (struct msm_pcie_phy_info_t *)
+			devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+
+		if (dev->port_phy_sequence) {
+			dev->port_phy_len =
+				size / sizeof(*dev->port_phy_sequence);
+
+			of_property_read_u32_array(pdev->dev.of_node,
+				"qcom,port-phy-sequence",
+				(unsigned int *)dev->port_phy_sequence,
+				size / sizeof(dev->port_phy_sequence->offset));
+		} else {
+			PCIE_ERR(dev,
+				"RC%d: Could not allocate memory for port phy init sequence.\n",
+				dev->rc_idx);
+			ret = -ENOMEM;
+			goto out;
+		}
+	} else {
+		PCIE_DBG(dev, "RC%d: port phy sequence is not present in DT\n",
+			dev->rc_idx);
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_CLK; i++) {
+		clk_info = &dev->clk[i];
+
+		clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
+
+		if (IS_ERR(clk_info->hdl)) {
+			if (clk_info->required) {
+				PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
+				clk_info->name, PTR_ERR(clk_info->hdl));
+				ret = PTR_ERR(clk_info->hdl);
+				goto out;
+			} else {
+				PCIE_DBG(dev, "Ignoring Clock %s\n",
+					clk_info->name);
+				clk_info->hdl = NULL;
+			}
+		} else {
+			if (clkfreq != NULL) {
+				clk_info->freq = clkfreq[i +
+					MSM_PCIE_MAX_PIPE_CLK];
+				PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
+					clk_info->name, clk_info->freq);
+			}
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_PIPE_CLK; i++) {
+		clk_info = &dev->pipeclk[i];
+
+		clk_info->hdl = devm_clk_get(&pdev->dev, clk_info->name);
+
+		if (IS_ERR(clk_info->hdl)) {
+			if (clk_info->required) {
+				PCIE_DBG(dev, "Clock %s isn't available:%ld\n",
+				clk_info->name, PTR_ERR(clk_info->hdl));
+				ret = PTR_ERR(clk_info->hdl);
+				goto out;
+			} else {
+				PCIE_DBG(dev, "Ignoring Clock %s\n",
+					clk_info->name);
+				clk_info->hdl = NULL;
+			}
+		} else {
+			if (clkfreq != NULL) {
+				clk_info->freq = clkfreq[i];
+				PCIE_DBG(dev, "Freq of Clock %s is:%d\n",
+					clk_info->name, clk_info->freq);
+			}
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
+		reset_info = &dev->reset[i];
+
+		reset_info->hdl = devm_reset_control_get(&pdev->dev,
+						reset_info->name);
+
+		if (IS_ERR(reset_info->hdl)) {
+			if (reset_info->required) {
+				PCIE_DBG(dev,
+					"Reset %s isn't available:%ld\n",
+					reset_info->name,
+					PTR_ERR(reset_info->hdl));
+
+				ret = PTR_ERR(reset_info->hdl);
+				reset_info->hdl = NULL;
+				goto out;
+			} else {
+				PCIE_DBG(dev, "Ignoring Reset %s\n",
+					reset_info->name);
+				reset_info->hdl = NULL;
+			}
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
+		pipe_reset_info = &dev->pipe_reset[i];
+
+		pipe_reset_info->hdl = devm_reset_control_get(&pdev->dev,
+						pipe_reset_info->name);
+
+		if (IS_ERR(pipe_reset_info->hdl)) {
+			if (pipe_reset_info->required) {
+				PCIE_DBG(dev,
+					"Pipe Reset %s isn't available:%ld\n",
+					pipe_reset_info->name,
+					PTR_ERR(pipe_reset_info->hdl));
+
+				ret = PTR_ERR(pipe_reset_info->hdl);
+				pipe_reset_info->hdl = NULL;
+				goto out;
+			} else {
+				PCIE_DBG(dev, "Ignoring Pipe Reset %s\n",
+					pipe_reset_info->name);
+				pipe_reset_info->hdl = NULL;
+			}
+		}
+	}
+
+	dev->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+	if (!dev->bus_scale_table) {
+		PCIE_DBG(dev, "PCIe: No bus scale table for RC%d (%s)\n",
+			dev->rc_idx, dev->pdev->name);
+		dev->bus_client = 0;
+	} else {
+		dev->bus_client =
+			msm_bus_scale_register_client(dev->bus_scale_table);
+		if (!dev->bus_client) {
+			PCIE_ERR(dev,
+				"PCIe: Failed to register bus client for RC%d (%s)\n",
+				dev->rc_idx, dev->pdev->name);
+			msm_bus_cl_clear_pdata(dev->bus_scale_table);
+			ret = -ENODEV;
+			goto out;
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
+		res_info = &dev->res[i];
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							   res_info->name);
+
+		if (!res) {
+			PCIE_ERR(dev, "PCIe: RC%d can't get %s resource.\n",
+				dev->rc_idx, res_info->name);
+		} else {
+			PCIE_DBG(dev, "start addr for %s is %pa.\n",
+				res_info->name,	&res->start);
+
+			res_info->base = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+			if (!res_info->base) {
+				PCIE_ERR(dev, "PCIe: RC%d can't remap %s.\n",
+					dev->rc_idx, res_info->name);
+				ret = -ENOMEM;
+				goto out;
+			} else {
+				res_info->resource = res;
+			}
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
+		irq_info = &dev->irq[i];
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+							   irq_info->name);
+
+		if (!res) {
+			PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
+				dev->rc_idx, irq_info->name);
+		} else {
+			irq_info->num = res->start;
+			PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
+					irq_info->num);
+		}
+	}
+
+	for (i = 0; i < MSM_PCIE_MAX_MSI; i++) {
+		msi_info = &dev->msi[i];
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+							   msi_info->name);
+
+		if (!res) {
+			PCIE_DBG(dev, "PCIe: RC%d can't find IRQ # for %s.\n",
+				dev->rc_idx, msi_info->name);
+		} else {
+			msi_info->num = res->start;
+			PCIE_DBG(dev, "IRQ # for %s is %d.\n", msi_info->name,
+					msi_info->num);
+		}
+	}
+
+	/* All allocations succeeded */
+
+	if (dev->gpio[MSM_PCIE_GPIO_WAKE].num)
+		dev->wake_n = gpio_to_irq(dev->gpio[MSM_PCIE_GPIO_WAKE].num);
+	else
+		dev->wake_n = 0;
+
+	dev->parf = dev->res[MSM_PCIE_RES_PARF].base;
+	dev->phy = dev->res[MSM_PCIE_RES_PHY].base;
+	dev->elbi = dev->res[MSM_PCIE_RES_ELBI].base;
+	dev->dm_core = dev->res[MSM_PCIE_RES_DM_CORE].base;
+	dev->conf = dev->res[MSM_PCIE_RES_CONF].base;
+	dev->bars = dev->res[MSM_PCIE_RES_BARS].base;
+	dev->tcsr = dev->res[MSM_PCIE_RES_TCSR].base;
+	dev->dev_mem_res = dev->res[MSM_PCIE_RES_BARS].resource;
+	dev->dev_io_res = dev->res[MSM_PCIE_RES_IO].resource;
+	dev->dev_io_res->flags = IORESOURCE_IO;
+
+out:
+	kfree(clkfreq);
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+	return ret;
+}
+
+static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
+{
+	dev->parf = NULL;
+	dev->elbi = NULL;
+	dev->dm_core = NULL;
+	dev->conf = NULL;
+	dev->bars = NULL;
+	dev->tcsr = NULL;
+	dev->dev_mem_res = NULL;
+	dev->dev_io_res = NULL;
+}
+
+int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
+{
+	int ret = 0;
+	uint32_t val;
+	long int retries = 0;
+	int link_check_count = 0;
+
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	mutex_lock(&dev->setup_lock);
+
+	if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
+		PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
+			dev->rc_idx);
+		goto out;
+	}
+
+	/* assert PCIe reset link to keep EP in reset */
+
+	PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
+		dev->rc_idx);
+	gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+				dev->gpio[MSM_PCIE_GPIO_PERST].on);
+	usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
+				 PERST_PROPAGATION_DELAY_US_MAX);
+
+	/* enable power */
+
+	if (options & PM_VREG) {
+		ret = msm_pcie_vreg_init(dev);
+		if (ret)
+			goto out;
+	}
+
+	/* enable clocks */
+	if (options & PM_CLK) {
+		ret = msm_pcie_clk_init(dev);
+		/* ensure that changes propagated to the hardware */
+		wmb();
+		if (ret)
+			goto clk_fail;
+	}
+
+	if (dev->scm_dev_id) {
+		PCIE_DBG(dev, "RC%d: restoring sec config\n", dev->rc_idx);
+		msm_pcie_restore_sec_config(dev);
+	}
+
+	/* enable PCIe clocks and resets */
+	msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
+
+	/* change DBI base address */
+	writel_relaxed(0, dev->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	writel_relaxed(0x365E, dev->parf + PCIE20_PARF_SYS_CTRL);
+
+	msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
+				0, BIT(4));
+
+	/* enable selected IRQ */
+	if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
+		msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
+
+		msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
+					BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
+					BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
+					BIT(MSM_PCIE_INT_EVT_AER_ERR) |
+					BIT(MSM_PCIE_INT_EVT_MSI_0) |
+					BIT(MSM_PCIE_INT_EVT_MSI_1) |
+					BIT(MSM_PCIE_INT_EVT_MSI_2) |
+					BIT(MSM_PCIE_INT_EVT_MSI_3) |
+					BIT(MSM_PCIE_INT_EVT_MSI_4) |
+					BIT(MSM_PCIE_INT_EVT_MSI_5) |
+					BIT(MSM_PCIE_INT_EVT_MSI_6) |
+					BIT(MSM_PCIE_INT_EVT_MSI_7));
+
+		PCIE_DBG(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
+			dev->rc_idx,
+			readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
+	}
+
+	if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_16M)
+		writel_relaxed(SZ_32M, dev->parf +
+			PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
+	else if (dev->dev_mem_res->end - dev->dev_mem_res->start > SZ_8M)
+		writel_relaxed(SZ_16M, dev->parf +
+			PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
+	else
+		writel_relaxed(SZ_8M, dev->parf +
+			PCIE20_PARF_SLV_ADDR_SPACE_SIZE);
+
+	if (dev->use_msi) {
+		PCIE_DBG(dev, "RC%d: enable WR halt.\n", dev->rc_idx);
+		val = dev->wr_halt_size ? dev->wr_halt_size :
+			readl_relaxed(dev->parf +
+				PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+
+		msm_pcie_write_reg(dev->parf,
+			PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
+			BIT(31) | val);
+
+		PCIE_DBG(dev,
+			"RC%d: PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT: 0x%x.\n",
+			dev->rc_idx,
+			readl_relaxed(dev->parf +
+				PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT));
+	}
+
+	mutex_lock(&com_phy_lock);
+	/* init PCIe PHY */
+	if (!num_rc_on)
+		pcie_phy_init(dev);
+
+	num_rc_on++;
+	mutex_unlock(&com_phy_lock);
+
+	if (options & PM_PIPE_CLK) {
+		usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
+					 PHY_STABILIZATION_DELAY_US_MAX);
+		/* Enable the pipe clock */
+		ret = msm_pcie_pipe_clk_init(dev);
+		/* ensure that changes propagated to the hardware */
+		wmb();
+		if (ret)
+			goto link_fail;
+	}
+
+	PCIE_DBG(dev, "RC%d: waiting for phy ready...\n", dev->rc_idx);
+
+	do {
+		if (pcie_phy_is_ready(dev))
+			break;
+		retries++;
+		usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
+					 REFCLK_STABILIZATION_DELAY_US_MAX);
+	} while (retries < PHY_READY_TIMEOUT_COUNT);
+
+	PCIE_DBG(dev, "RC%d: number of PHY retries:%ld.\n",
+		dev->rc_idx, retries);
+
+	if (pcie_phy_is_ready(dev))
+		PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
+	else {
+		PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
+			dev->rc_idx);
+		ret = -ENODEV;
+		pcie_phy_dump(dev);
+		goto link_fail;
+	}
+
+	pcie_pcs_port_phy_init(dev);
+
+	if (dev->ep_latency)
+		usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
+
+	if (dev->gpio[MSM_PCIE_GPIO_EP].num)
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
+				dev->gpio[MSM_PCIE_GPIO_EP].on);
+
+	/* de-assert PCIe reset link to bring EP out of reset */
+
+	PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
+		dev->rc_idx);
+	gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+				1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
+	usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
+
+	/* set max tlp read size */
+	msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
+				0x7000, dev->tlp_rd_size);
+
+	/* enable link training */
+	msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
+
+	PCIE_DBG(dev, "%s", "check if link is up\n");
+
+	/* Wait for up to 100ms for the link to come up */
+	do {
+		usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
+		val =  readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
+	} while ((!(val & XMLH_LINK_UP) ||
+		!msm_pcie_confirm_linkup(dev, false, false, NULL))
+		&& (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
+
+	if ((val & XMLH_LINK_UP) &&
+		msm_pcie_confirm_linkup(dev, false, false, NULL)) {
+		PCIE_DBG(dev, "Link is up after %d checkings\n",
+			link_check_count);
+		PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
+	} else {
+		PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
+			dev->rc_idx);
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+			dev->gpio[MSM_PCIE_GPIO_PERST].on);
+		PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
+			dev->rc_idx);
+		ret = -1;
+		goto link_fail;
+	}
+
+	msm_pcie_config_controller(dev);
+
+	if (!dev->msi_gicm_addr)
+		msm_pcie_config_msi_controller(dev);
+
+	msm_pcie_config_link_state(dev);
+
+	dev->link_status = MSM_PCIE_LINK_ENABLED;
+	dev->power_on = true;
+	dev->suspending = false;
+	dev->link_turned_on_counter++;
+
+	goto out;
+
+link_fail:
+	if (dev->gpio[MSM_PCIE_GPIO_EP].num)
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
+				1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
+
+	mutex_lock(&com_phy_lock);
+	num_rc_on--;
+	if (!num_rc_on && dev->common_phy) {
+		PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
+			dev->rc_idx);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
+	}
+	mutex_unlock(&com_phy_lock);
+
+	msm_pcie_pipe_clk_deinit(dev);
+	msm_pcie_clk_deinit(dev);
+clk_fail:
+	msm_pcie_vreg_deinit(dev);
+out:
+	mutex_unlock(&dev->setup_lock);
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+
+	return ret;
+}
+
+void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options)
+{
+	PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
+
+	mutex_lock(&dev->setup_lock);
+
+	if (!dev->power_on) {
+		PCIE_DBG(dev,
+			"PCIe: the link of RC%d is already power down.\n",
+			dev->rc_idx);
+		mutex_unlock(&dev->setup_lock);
+		return;
+	}
+
+	dev->link_status = MSM_PCIE_LINK_DISABLED;
+	dev->power_on = false;
+	dev->link_turned_off_counter++;
+
+	PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
+		dev->rc_idx);
+
+	gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+				dev->gpio[MSM_PCIE_GPIO_PERST].on);
+
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_SW_RESET(dev->rc_idx, dev->common_phy), 0x1);
+	msm_pcie_write_reg(dev->phy,
+		PCIE_N_POWER_DOWN_CONTROL(dev->rc_idx, dev->common_phy), 0);
+
+	mutex_lock(&com_phy_lock);
+	num_rc_on--;
+	if (!num_rc_on && dev->common_phy) {
+		PCIE_DBG(dev, "PCIe: RC%d is powering down the common phy\n",
+			dev->rc_idx);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_SW_RESET, 0x1);
+		msm_pcie_write_reg(dev->phy, PCIE_COM_POWER_DOWN_CONTROL, 0);
+	}
+	mutex_unlock(&com_phy_lock);
+
+	if (options & PM_CLK) {
+		msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
+					BIT(0));
+		msm_pcie_clk_deinit(dev);
+	}
+
+	if (options & PM_VREG)
+		msm_pcie_vreg_deinit(dev);
+
+	if (options & PM_PIPE_CLK)
+		msm_pcie_pipe_clk_deinit(dev);
+
+	if (dev->gpio[MSM_PCIE_GPIO_EP].num)
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
+				1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
+
+	mutex_unlock(&dev->setup_lock);
+
+	PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
+}
+
+static void msm_pcie_config_ep_aer(struct msm_pcie_dev_t *dev,
+				struct msm_pcie_device_info *ep_dev_info)
+{
+	u32 val;
+	void __iomem *ep_base = ep_dev_info->conf_base;
+	u32 current_offset = readl_relaxed(ep_base + PCIE_CAP_PTR_OFFSET) &
+						0xff;
+
+	while (current_offset) {
+		if (msm_pcie_check_align(dev, current_offset))
+			return;
+
+		val = readl_relaxed(ep_base + current_offset);
+		if ((val & 0xff) == PCIE20_CAP_ID) {
+			ep_dev_info->dev_ctrlstts_offset =
+				current_offset + 0x8;
+			break;
+		}
+		current_offset = (val >> 8) & 0xff;
+	}
+
+	if (!ep_dev_info->dev_ctrlstts_offset) {
+		PCIE_DBG(dev,
+			"RC%d endpoint does not support PCIe cap registers\n",
+			dev->rc_idx);
+		return;
+	}
+
+	PCIE_DBG2(dev, "RC%d: EP dev_ctrlstts_offset: 0x%x\n",
+		dev->rc_idx, ep_dev_info->dev_ctrlstts_offset);
+
+	/* Enable AER on EP */
+	msm_pcie_write_mask(ep_base + ep_dev_info->dev_ctrlstts_offset, 0,
+				BIT(3)|BIT(2)|BIT(1)|BIT(0));
+
+	PCIE_DBG(dev, "EP's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
+		readl_relaxed(ep_base + ep_dev_info->dev_ctrlstts_offset));
+}
+
+static int msm_pcie_config_device_table(struct device *dev, void *pdev)
+{
+	struct pci_dev *pcidev = to_pci_dev(dev);
+	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
+	struct msm_pcie_device_info *dev_table_t = pcie_dev->pcidev_table;
+	struct resource *axi_conf = pcie_dev->res[MSM_PCIE_RES_CONF].resource;
+	int ret = 0;
+	u32 rc_idx = pcie_dev->rc_idx;
+	u32 i, index;
+	u32 bdf = 0;
+	u8 type;
+	u32 h_type;
+	u32 bme;
+
+	if (!pcidev) {
+		PCIE_ERR(pcie_dev,
+			"PCIe: Did not find PCI device in list for RC%d.\n",
+			pcie_dev->rc_idx);
+		return -ENODEV;
+	}
+
+	PCIE_DBG(pcie_dev,
+		"PCI device found: vendor-id:0x%x device-id:0x%x\n",
+		pcidev->vendor, pcidev->device);
+
+	if (!pcidev->bus->number)
+		return ret;
+
+	bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
+	type = pcidev->bus->number == 1 ?
+		PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
+
+	for (i = 0; i < (MAX_RC_NUM * MAX_DEVICE_NUM); i++) {
+		if (msm_pcie_dev_tbl[i].bdf == bdf &&
+			!msm_pcie_dev_tbl[i].dev) {
+			for (index = 0; index < MAX_DEVICE_NUM; index++) {
+				if (dev_table_t[index].bdf == bdf) {
+					msm_pcie_dev_tbl[i].dev = pcidev;
+					msm_pcie_dev_tbl[i].domain = rc_idx;
+					msm_pcie_dev_tbl[i].conf_base =
+						pcie_dev->conf + index * SZ_4K;
+					msm_pcie_dev_tbl[i].phy_address =
+						axi_conf->start + index * SZ_4K;
+
+					dev_table_t[index].dev = pcidev;
+					dev_table_t[index].domain = rc_idx;
+					dev_table_t[index].conf_base =
+						pcie_dev->conf + index * SZ_4K;
+					dev_table_t[index].phy_address =
+						axi_conf->start + index * SZ_4K;
+
+					msm_pcie_iatu_config(pcie_dev, index,
+						type,
+						dev_table_t[index].phy_address,
+						dev_table_t[index].phy_address
+						+ SZ_4K - 1,
+						bdf);
+
+					h_type = readl_relaxed(
+						dev_table_t[index].conf_base +
+						PCIE20_HEADER_TYPE);
+
+					bme = readl_relaxed(
+						dev_table_t[index].conf_base +
+						PCIE20_COMMAND_STATUS);
+
+					if (h_type & (1 << 16)) {
+						pci_write_config_dword(pcidev,
+							PCIE20_COMMAND_STATUS,
+							bme | 0x06);
+					} else {
+						pcie_dev->num_ep++;
+						dev_table_t[index].registered =
+							false;
+					}
+
+					if (pcie_dev->num_ep > 1)
+						pcie_dev->pending_ep_reg = true;
+
+					msm_pcie_config_ep_aer(pcie_dev,
+						&dev_table_t[index]);
+
+					break;
+				}
+			}
+			if (index == MAX_DEVICE_NUM) {
+				PCIE_ERR(pcie_dev,
+					"RC%d PCI device table is full.\n",
+					rc_idx);
+				ret = index;
+			} else {
+				break;
+			}
+		} else if (msm_pcie_dev_tbl[i].bdf == bdf &&
+			pcidev == msm_pcie_dev_tbl[i].dev) {
+			break;
+		}
+	}
+	if (i == MAX_RC_NUM * MAX_DEVICE_NUM) {
+		PCIE_ERR(pcie_dev,
+			"Global PCI device table is full: %d elements.\n",
+			i);
+		PCIE_ERR(pcie_dev,
+			"Bus number is 0x%x\nDevice number is 0x%x\n",
+			pcidev->bus->number, pcidev->devfn);
+		ret = i;
+	}
+	return ret;
+}
+
+int msm_pcie_configure_sid(struct device *dev, u32 *sid, int *domain)
+{
+	struct pci_dev *pcidev;
+	struct msm_pcie_dev_t *pcie_dev;
+	struct pci_bus *bus;
+	int i;
+	u32 bdf;
+
+	if (!dev) {
+		pr_err("%s: PCIe: endpoint device passed in is NULL\n",
+			__func__);
+		return MSM_PCIE_ERROR;
+	}
+
+	pcidev = to_pci_dev(dev);
+	if (!pcidev) {
+		pr_err("%s: PCIe: PCI device of endpoint is NULL\n",
+			__func__);
+		return MSM_PCIE_ERROR;
+	}
+
+	bus = pcidev->bus;
+	if (!bus) {
+		pr_err("%s: PCIe: Bus of PCI device is NULL\n",
+			__func__);
+		return MSM_PCIE_ERROR;
+	}
+
+	while (!pci_is_root_bus(bus))
+		bus = bus->parent;
+
+	pcie_dev = (struct msm_pcie_dev_t *)(bus->sysdata);
+	if (!pcie_dev) {
+		pr_err("%s: PCIe: Could not get PCIe structure\n",
+			__func__);
+		return MSM_PCIE_ERROR;
+	}
+
+	if (!pcie_dev->smmu_exist) {
+		PCIE_DBG(pcie_dev,
+			"PCIe: RC:%d: smmu does not exist\n",
+			pcie_dev->rc_idx);
+		return MSM_PCIE_ERROR;
+	}
+
+	PCIE_DBG(pcie_dev, "PCIe: RC%d: device address is: %p\n",
+		pcie_dev->rc_idx, dev);
+	PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device address is: %p\n",
+		pcie_dev->rc_idx, pcidev);
+
+	*domain = pcie_dev->rc_idx;
+
+	if (pcie_dev->current_short_bdf < (MAX_SHORT_BDF_NUM - 1)) {
+		pcie_dev->current_short_bdf++;
+	} else {
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d: No more short BDF left\n",
+			pcie_dev->rc_idx);
+		return MSM_PCIE_ERROR;
+	}
+
+	bdf = BDF_OFFSET(pcidev->bus->number, pcidev->devfn);
+
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		if (pcie_dev->pcidev_table[i].bdf == bdf) {
+			*sid = pcie_dev->smmu_sid_base +
+				((pcie_dev->rc_idx << 4) |
+				pcie_dev->current_short_bdf);
+
+			msm_pcie_write_reg(pcie_dev->parf,
+				PCIE20_PARF_BDF_TRANSLATE_N +
+				pcie_dev->current_short_bdf * 4,
+				bdf >> 16);
+
+			pcie_dev->pcidev_table[i].sid = *sid;
+			pcie_dev->pcidev_table[i].short_bdf =
+				pcie_dev->current_short_bdf;
+			break;
+		}
+	}
+
+	if (i == MAX_DEVICE_NUM) {
+		pcie_dev->current_short_bdf--;
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d could not find BDF:%d\n",
+			pcie_dev->rc_idx, bdf);
+		return MSM_PCIE_ERROR;
+	}
+
+	PCIE_DBG(pcie_dev,
+		"PCIe: RC%d: Device: %02x:%02x.%01x received SID %d\n",
+		pcie_dev->rc_idx,
+		bdf >> 24,
+		bdf >> 19 & 0x1f,
+		bdf >> 16 & 0x07,
+		*sid);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_pcie_configure_sid);
+
+int msm_pcie_enumerate(u32 rc_idx)
+{
+	int ret = 0, bus_ret = 0, scan_ret = 0;
+	struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
+
+	mutex_lock(&dev->enumerate_lock);
+
+	PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
+
+	if (!dev->drv_ready) {
+		PCIE_DBG(dev, "RC%d has not been successfully probed yet\n",
+			rc_idx);
+		ret = -EPROBE_DEFER;
+		goto out;
+	}
+
+	if (!dev->enumerated) {
+		ret = msm_pcie_enable(dev, PM_ALL);
+
+		/* kick start ARM PCI configuration framework */
+		if (!ret) {
+			struct pci_dev *pcidev = NULL;
+			bool found = false;
+			struct pci_bus *bus;
+			resource_size_t iobase = 0;
+			u32 ids = readl_relaxed(msm_pcie_dev[rc_idx].dm_core);
+			u32 vendor_id = ids & 0xffff;
+			u32 device_id = (ids & 0xffff0000) >> 16;
+			LIST_HEAD(res);
+
+			PCIE_DBG(dev, "vendor-id:0x%x device_id:0x%x\n",
+					vendor_id, device_id);
+
+			ret = of_pci_get_host_bridge_resources(
+						dev->pdev->dev.of_node,
+						0, 0xff, &res, &iobase);
+			if (ret) {
+				PCIE_ERR(dev,
+					"PCIe: failed to get host bridge resources for RC%d: %d\n",
+					dev->rc_idx, ret);
+				goto out;
+			}
+
+			bus = pci_create_root_bus(&dev->pdev->dev, 0,
+						&msm_pcie_ops,
+						msm_pcie_setup_sys_data(dev),
+						&res);
+			if (!bus) {
+				PCIE_ERR(dev,
+					"PCIe: failed to create root bus for RC%d\n",
+					dev->rc_idx);
+				ret = -ENOMEM;
+				goto out;
+			}
+
+			scan_ret = pci_scan_child_bus(bus);
+			PCIE_DBG(dev,
+				"PCIe: RC%d: The max subordinate bus number discovered is %d\n",
+				dev->rc_idx, ret);
+
+			msm_pcie_fixup_irqs(dev);
+			pci_assign_unassigned_bus_resources(bus);
+			pci_bus_add_devices(bus);
+
+			dev->enumerated = true;
+
+			msm_pcie_write_mask(dev->dm_core +
+				PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
+
+			if (dev->cpl_timeout && dev->bridge_found)
+				msm_pcie_write_reg_field(dev->dm_core,
+					PCIE20_DEVICE_CONTROL2_STATUS2,
+					0xf, dev->cpl_timeout);
+
+			if (dev->shadow_en) {
+				u32 val = readl_relaxed(dev->dm_core +
+						PCIE20_COMMAND_STATUS);
+				PCIE_DBG(dev, "PCIE20_COMMAND_STATUS:0x%x\n",
+					val);
+				dev->rc_shadow[PCIE20_COMMAND_STATUS / 4] = val;
+			}
+
+			do {
+				pcidev = pci_get_device(vendor_id,
+					device_id, pcidev);
+				if (pcidev && (&msm_pcie_dev[rc_idx] ==
+					(struct msm_pcie_dev_t *)
+					PCIE_BUS_PRIV_DATA(pcidev->bus))) {
+					msm_pcie_dev[rc_idx].dev = pcidev;
+					found = true;
+					PCIE_DBG(&msm_pcie_dev[rc_idx],
+						"PCI device is found for RC%d\n",
+						rc_idx);
+				}
+			} while (!found && pcidev);
+
+			if (!pcidev) {
+				PCIE_ERR(dev,
+					"PCIe: Did not find PCI device for RC%d.\n",
+					dev->rc_idx);
+				ret = -ENODEV;
+				goto out;
+			}
+
+			bus_ret = bus_for_each_dev(&pci_bus_type, NULL, dev,
+					&msm_pcie_config_device_table);
+
+			if (bus_ret) {
+				PCIE_ERR(dev,
+					"PCIe: Failed to set up device table for RC%d\n",
+					dev->rc_idx);
+				ret = -ENODEV;
+				goto out;
+			}
+		} else {
+			PCIE_ERR(dev, "PCIe: failed to enable RC%d.\n",
+				dev->rc_idx);
+		}
+	} else {
+		PCIE_ERR(dev, "PCIe: RC%d has already been enumerated.\n",
+			dev->rc_idx);
+	}
+
+out:
+	mutex_unlock(&dev->enumerate_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_enumerate);
+
+static void msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
+					enum msm_pcie_event event)
+{
+	if (dev->event_reg && dev->event_reg->callback &&
+		(dev->event_reg->events & event)) {
+		struct msm_pcie_notify *notify = &dev->event_reg->notify;
+
+		notify->event = event;
+		notify->user = dev->event_reg->user;
+		PCIE_DBG(dev, "PCIe: callback RC%d for event %d\n",
+			dev->rc_idx, event);
+		dev->event_reg->callback(notify);
+
+		if ((dev->event_reg->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
+				(event == MSM_PCIE_EVENT_LINKDOWN)) {
+			dev->user_suspend = true;
+			PCIE_DBG(dev,
+				"PCIe: Client of RC%d will recover the link later.\n",
+				dev->rc_idx);
+			return;
+		}
+	} else {
+		PCIE_DBG2(dev,
+			"PCIe: Client of RC%d does not have registration for event %d\n",
+			dev->rc_idx, event);
+	}
+}
+
+static void handle_wake_func(struct work_struct *work)
+{
+	int i, ret;
+	struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
+					handle_wake_work);
+
+	PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
+
+	mutex_lock(&dev->recovery_lock);
+
+	if (!dev->enumerated) {
+		PCIE_DBG(dev,
+			"PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
+			dev->rc_idx);
+
+		ret = msm_pcie_enumerate(dev->rc_idx);
+		if (ret) {
+			PCIE_ERR(dev,
+				"PCIe: failed to enable RC%d upon wake request from the device.\n",
+				dev->rc_idx);
+			goto out;
+		}
+
+		if (dev->num_ep > 1) {
+			for (i = 0; i < MAX_DEVICE_NUM; i++) {
+				dev->event_reg = dev->pcidev_table[i].event_reg;
+
+				if ((dev->link_status == MSM_PCIE_LINK_ENABLED)
+					&& dev->event_reg &&
+					dev->event_reg->callback &&
+					(dev->event_reg->events &
+					MSM_PCIE_EVENT_LINKUP)) {
+					struct msm_pcie_notify *notify =
+						&dev->event_reg->notify;
+					notify->event = MSM_PCIE_EVENT_LINKUP;
+					notify->user = dev->event_reg->user;
+					PCIE_DBG(dev,
+						"PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
+						dev->rc_idx);
+					dev->event_reg->callback(notify);
+				}
+			}
+		} else {
+			if ((dev->link_status == MSM_PCIE_LINK_ENABLED) &&
+				dev->event_reg && dev->event_reg->callback &&
+				(dev->event_reg->events &
+				MSM_PCIE_EVENT_LINKUP)) {
+				struct msm_pcie_notify *notify =
+						&dev->event_reg->notify;
+				notify->event = MSM_PCIE_EVENT_LINKUP;
+				notify->user = dev->event_reg->user;
+				PCIE_DBG(dev,
+					"PCIe: Linkup callback for RC%d after enumeration is successful in wake IRQ handling\n",
+					dev->rc_idx);
+				dev->event_reg->callback(notify);
+			} else {
+				PCIE_DBG(dev,
+					"PCIe: Client of RC%d does not have registration for linkup event.\n",
+					dev->rc_idx);
+			}
+		}
+		goto out;
+	} else {
+		PCIE_ERR(dev,
+			"PCIe: The enumeration for RC%d has already been done.\n",
+			dev->rc_idx);
+		goto out;
+	}
+
+out:
+	mutex_unlock(&dev->recovery_lock);
+}
+
+static irqreturn_t handle_aer_irq(int irq, void *data)
+{
+	struct msm_pcie_dev_t *dev = data;
+
+	int corr_val = 0, uncorr_val = 0, rc_err_status = 0;
+	int ep_corr_val = 0, ep_uncorr_val = 0;
+	int rc_dev_ctrlstts = 0, ep_dev_ctrlstts = 0;
+	u32 ep_dev_ctrlstts_offset = 0;
+	int i, j, ep_src_bdf = 0;
+	void __iomem *ep_base = NULL;
+	unsigned long irqsave_flags;
+
+	PCIE_DBG2(dev,
+		"AER Interrupt handler fired for RC%d irq %d\nrc_corr_counter: %lu\nrc_non_fatal_counter: %lu\nrc_fatal_counter: %lu\nep_corr_counter: %lu\nep_non_fatal_counter: %lu\nep_fatal_counter: %lu\n",
+		dev->rc_idx, irq, dev->rc_corr_counter,
+		dev->rc_non_fatal_counter, dev->rc_fatal_counter,
+		dev->ep_corr_counter, dev->ep_non_fatal_counter,
+		dev->ep_fatal_counter);
+
+	spin_lock_irqsave(&dev->aer_lock, irqsave_flags);
+
+	if (dev->suspending) {
+		PCIE_DBG2(dev,
+			"PCIe: RC%d is currently suspending.\n",
+			dev->rc_idx);
+		spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
+		return IRQ_HANDLED;
+	}
+
+	uncorr_val = readl_relaxed(dev->dm_core +
+				PCIE20_AER_UNCORR_ERR_STATUS_REG);
+	corr_val = readl_relaxed(dev->dm_core +
+				PCIE20_AER_CORR_ERR_STATUS_REG);
+	rc_err_status = readl_relaxed(dev->dm_core +
+				PCIE20_AER_ROOT_ERR_STATUS_REG);
+	rc_dev_ctrlstts = readl_relaxed(dev->dm_core +
+				PCIE20_CAP_DEVCTRLSTATUS);
+
+	if (uncorr_val)
+		PCIE_DBG(dev, "RC's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
+				uncorr_val);
+	if (corr_val && (dev->rc_corr_counter < corr_counter_limit))
+		PCIE_DBG(dev, "RC's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
+				corr_val);
+
+	if ((rc_dev_ctrlstts >> 18) & 0x1)
+		dev->rc_fatal_counter++;
+	if ((rc_dev_ctrlstts >> 17) & 0x1)
+		dev->rc_non_fatal_counter++;
+	if ((rc_dev_ctrlstts >> 16) & 0x1)
+		dev->rc_corr_counter++;
+
+	msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
+				BIT(18)|BIT(17)|BIT(16));
+
+	if (dev->link_status == MSM_PCIE_LINK_DISABLED) {
+		PCIE_DBG2(dev, "RC%d link is down\n", dev->rc_idx);
+		goto out;
+	}
+
+	for (i = 0; i < 2; i++) {
+		if (i)
+			ep_src_bdf = readl_relaxed(dev->dm_core +
+				PCIE20_AER_ERR_SRC_ID_REG) & ~0xffff;
+		else
+			ep_src_bdf = (readl_relaxed(dev->dm_core +
+				PCIE20_AER_ERR_SRC_ID_REG) & 0xffff) << 16;
+
+		if (!ep_src_bdf)
+			continue;
+
+		for (j = 0; j < MAX_DEVICE_NUM; j++) {
+			if (ep_src_bdf == dev->pcidev_table[j].bdf) {
+				PCIE_DBG2(dev,
+					"PCIe: %s Error from Endpoint: %02x:%02x.%01x\n",
+					i ? "Uncorrectable" : "Correctable",
+					dev->pcidev_table[j].bdf >> 24,
+					dev->pcidev_table[j].bdf >> 19 & 0x1f,
+					dev->pcidev_table[j].bdf >> 16 & 0x07);
+				ep_base = dev->pcidev_table[j].conf_base;
+				ep_dev_ctrlstts_offset = dev->
+					pcidev_table[j].dev_ctrlstts_offset;
+				break;
+			}
+		}
+
+		if (!ep_base) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d no endpoint found for reported error\n",
+				dev->rc_idx);
+			goto out;
+		}
+
+		ep_uncorr_val = readl_relaxed(ep_base +
+					PCIE20_AER_UNCORR_ERR_STATUS_REG);
+		ep_corr_val = readl_relaxed(ep_base +
+					PCIE20_AER_CORR_ERR_STATUS_REG);
+		ep_dev_ctrlstts = readl_relaxed(ep_base +
+					ep_dev_ctrlstts_offset);
+
+		if (ep_uncorr_val)
+			PCIE_DBG(dev,
+				"EP's PCIE20_AER_UNCORR_ERR_STATUS_REG:0x%x\n",
+				ep_uncorr_val);
+		if (ep_corr_val && (dev->ep_corr_counter < corr_counter_limit))
+			PCIE_DBG(dev,
+				"EP's PCIE20_AER_CORR_ERR_STATUS_REG:0x%x\n",
+				ep_corr_val);
+
+		if ((ep_dev_ctrlstts >> 18) & 0x1)
+			dev->ep_fatal_counter++;
+		if ((ep_dev_ctrlstts >> 17) & 0x1)
+			dev->ep_non_fatal_counter++;
+		if ((ep_dev_ctrlstts >> 16) & 0x1)
+			dev->ep_corr_counter++;
+
+		msm_pcie_write_mask(ep_base + ep_dev_ctrlstts_offset, 0,
+					BIT(18)|BIT(17)|BIT(16));
+
+		msm_pcie_write_reg_field(ep_base,
+				PCIE20_AER_UNCORR_ERR_STATUS_REG,
+				0x3fff031, 0x3fff031);
+		msm_pcie_write_reg_field(ep_base,
+				PCIE20_AER_CORR_ERR_STATUS_REG,
+				0xf1c1, 0xf1c1);
+	}
+out:
+	if (((dev->rc_corr_counter < corr_counter_limit) &&
+		(dev->ep_corr_counter < corr_counter_limit)) ||
+		uncorr_val || ep_uncorr_val)
+		PCIE_DBG(dev, "RC's PCIE20_AER_ROOT_ERR_STATUS_REG:0x%x\n",
+				rc_err_status);
+	msm_pcie_write_reg_field(dev->dm_core,
+			PCIE20_AER_UNCORR_ERR_STATUS_REG,
+			0x3fff031, 0x3fff031);
+	msm_pcie_write_reg_field(dev->dm_core,
+			PCIE20_AER_CORR_ERR_STATUS_REG,
+			0xf1c1, 0xf1c1);
+	msm_pcie_write_reg_field(dev->dm_core,
+			PCIE20_AER_ROOT_ERR_STATUS_REG,
+			0x7f, 0x7f);
+
+	spin_unlock_irqrestore(&dev->aer_lock, irqsave_flags);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_wake_irq(int irq, void *data)
+{
+	struct msm_pcie_dev_t *dev = data;
+	unsigned long irqsave_flags;
+	int i;
+
+	spin_lock_irqsave(&dev->wakeup_lock, irqsave_flags);
+
+	dev->wake_counter++;
+	PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
+			dev->wake_counter, dev->rc_idx);
+
+	PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
+		dev->rc_idx);
+
+	if (!dev->enumerated) {
+		PCIE_DBG(dev, "Start enumeating RC%d\n", dev->rc_idx);
+		if (dev->ep_wakeirq)
+			schedule_work(&dev->handle_wake_work);
+		else
+			PCIE_DBG(dev,
+				"wake irq is received but ep_wakeirq is not supported for RC%d.\n",
+				dev->rc_idx);
+	} else {
+		PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
+		__pm_stay_awake(&dev->ws);
+		__pm_relax(&dev->ws);
+
+		if (dev->num_ep > 1) {
+			for (i = 0; i < MAX_DEVICE_NUM; i++) {
+				dev->event_reg =
+					dev->pcidev_table[i].event_reg;
+				msm_pcie_notify_client(dev,
+					MSM_PCIE_EVENT_WAKEUP);
+			}
+		} else {
+			msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
+		}
+	}
+
+	spin_unlock_irqrestore(&dev->wakeup_lock, irqsave_flags);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_linkdown_irq(int irq, void *data)
+{
+	struct msm_pcie_dev_t *dev = data;
+	unsigned long irqsave_flags;
+	int i;
+
+	spin_lock_irqsave(&dev->linkdown_lock, irqsave_flags);
+
+	dev->linkdown_counter++;
+
+	PCIE_DBG(dev,
+		"PCIe: No. %ld linkdown IRQ for RC%d.\n",
+		dev->linkdown_counter, dev->rc_idx);
+
+	if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED) {
+		PCIE_DBG(dev,
+			"PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
+			dev->rc_idx);
+	} else if (dev->suspending) {
+		PCIE_DBG(dev,
+			"PCIe:the link of RC%d is suspending.\n",
+			dev->rc_idx);
+	} else {
+		dev->link_status = MSM_PCIE_LINK_DISABLED;
+		dev->shadow_en = false;
+
+		if (dev->linkdown_panic)
+			panic("User has chosen to panic on linkdown\n");
+
+		/* assert PERST */
+		gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
+				dev->gpio[MSM_PCIE_GPIO_PERST].on);
+		PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
+
+		if (dev->num_ep > 1) {
+			for (i = 0; i < MAX_DEVICE_NUM; i++) {
+				dev->event_reg =
+					dev->pcidev_table[i].event_reg;
+				msm_pcie_notify_client(dev,
+					MSM_PCIE_EVENT_LINKDOWN);
+			}
+		} else {
+			msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
+		}
+	}
+
+	spin_unlock_irqrestore(&dev->linkdown_lock, irqsave_flags);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_msi_irq(int irq, void *data)
+{
+	int i, j;
+	unsigned long val;
+	struct msm_pcie_dev_t *dev = data;
+	void __iomem *ctrl_status;
+
+	PCIE_DUMP(dev, "irq: %d\n", irq);
+
+	/*
+	 * check for set bits, clear it by setting that bit
+	 * and trigger corresponding irq
+	 */
+	for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
+		ctrl_status = dev->dm_core +
+				PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
+
+		val = readl_relaxed(ctrl_status);
+		while (val) {
+			j = find_first_bit(&val, 32);
+			writel_relaxed(BIT(j), ctrl_status);
+			/* ensure that interrupt is cleared (acked) */
+			wmb();
+			generic_handle_irq(
+			   irq_find_mapping(dev->irq_domain, (j + (32*i)))
+			   );
+			val = readl_relaxed(ctrl_status);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_global_irq(int irq, void *data)
+{
+	int i;
+	struct msm_pcie_dev_t *dev = data;
+	unsigned long irqsave_flags;
+	u32 status = 0;
+
+	spin_lock_irqsave(&dev->global_irq_lock, irqsave_flags);
+
+	status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
+			readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
+
+	msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
+
+	PCIE_DBG2(dev, "RC%d: Global IRQ %d received: 0x%x\n",
+		dev->rc_idx, irq, status);
+
+	for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
+		if (status & BIT(i)) {
+			switch (i) {
+			case MSM_PCIE_INT_EVT_LINK_DOWN:
+				PCIE_DBG(dev,
+					"PCIe: RC%d: handle linkdown event.\n",
+					dev->rc_idx);
+				handle_linkdown_irq(irq, data);
+				break;
+			case MSM_PCIE_INT_EVT_AER_LEGACY:
+				PCIE_DBG(dev,
+					"PCIe: RC%d: AER legacy event.\n",
+					dev->rc_idx);
+				handle_aer_irq(irq, data);
+				break;
+			case MSM_PCIE_INT_EVT_AER_ERR:
+				PCIE_DBG(dev,
+					"PCIe: RC%d: AER event.\n",
+					dev->rc_idx);
+				handle_aer_irq(irq, data);
+				break;
+			default:
+				PCIE_ERR(dev,
+					"PCIe: RC%d: Unexpected event %d is caught!\n",
+					dev->rc_idx, i);
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&dev->global_irq_lock, irqsave_flags);
+
+	return IRQ_HANDLED;
+}
+
+void msm_pcie_destroy_irq(unsigned int irq, struct msm_pcie_dev_t *pcie_dev)
+{
+	int pos, i;
+	struct msm_pcie_dev_t *dev;
+
+	if (pcie_dev)
+		dev = pcie_dev;
+	else
+		dev = irq_get_chip_data(irq);
+
+	if (!dev) {
+		pr_err("PCIe: device is null. IRQ:%d\n", irq);
+		return;
+	}
+
+	if (dev->msi_gicm_addr) {
+		PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
+
+		for (i = 0; i < MSM_PCIE_MAX_MSI; i++)
+			if (irq == dev->msi[i].num)
+				break;
+		if (i == MSM_PCIE_MAX_MSI) {
+			PCIE_ERR(dev,
+				"Could not find irq: %d in RC%d MSI table\n",
+				irq, dev->rc_idx);
+			return;
+		}
+
+		pos = i;
+	} else {
+		PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
+		pos = irq - irq_find_mapping(dev->irq_domain, 0);
+	}
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	PCIE_DBG(dev, "Before clear_bit pos:%d msi_irq_in_use:%ld\n",
+		pos, *dev->msi_irq_in_use);
+	clear_bit(pos, dev->msi_irq_in_use);
+	PCIE_DBG(dev, "After clear_bit pos:%d msi_irq_in_use:%ld\n",
+		pos, *dev->msi_irq_in_use);
+}
+
+/* hookup to linux pci msi framework */
+void arch_teardown_msi_irq(unsigned int irq)
+{
+	PCIE_GEN_DBG("irq %d deallocated\n", irq);
+	msm_pcie_destroy_irq(irq, NULL);
+}
+
+void arch_teardown_msi_irqs(struct pci_dev *dev)
+{
+	struct msi_desc *entry;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC:%d EP: vendor_id:0x%x device_id:0x%x\n",
+		pcie_dev->rc_idx, dev->vendor, dev->device);
+
+	pcie_dev->use_msi = false;
+
+	list_for_each_entry(entry, &dev->dev.msi_list, list) {
+		int i, nvec;
+
+		if (entry->irq == 0)
+			continue;
+		nvec = 1 << entry->msi_attrib.multiple;
+		for (i = 0; i < nvec; i++)
+			msm_pcie_destroy_irq(entry->irq + i, pcie_dev);
+	}
+}
+
+static void msm_pcie_msi_nop(struct irq_data *d)
+{
+}
+
+static struct irq_chip pcie_msi_chip = {
+	.name = "msm-pcie-msi",
+	.irq_ack = msm_pcie_msi_nop,
+	.irq_enable = unmask_msi_irq,
+	.irq_disable = mask_msi_irq,
+	.irq_mask = mask_msi_irq,
+	.irq_unmask = unmask_msi_irq,
+};
+
+static int msm_pcie_create_irq(struct msm_pcie_dev_t *dev)
+{
+	int irq, pos;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+again:
+	pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
+
+	if (pos >= PCIE_MSI_NR_IRQS)
+		return -ENOSPC;
+
+	PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
+
+	if (test_and_set_bit(pos, dev->msi_irq_in_use))
+		goto again;
+	else
+		PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
+
+	irq = irq_create_mapping(dev->irq_domain, pos);
+	if (!irq)
+		return -EINVAL;
+
+	return irq;
+}
+
+static int arch_setup_msi_irq_default(struct pci_dev *pdev,
+		struct msi_desc *desc, int nvec)
+{
+	int irq;
+	struct msi_msg msg;
+	struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	irq = msm_pcie_create_irq(dev);
+
+	PCIE_DBG(dev, "IRQ %d is allocated.\n", irq);
+
+	if (irq < 0)
+		return irq;
+
+	PCIE_DBG(dev, "irq %d allocated\n", irq);
+
+	irq_set_msi_desc(irq, desc);
+
+	/* write msi vector and data */
+	msg.address_hi = 0;
+	msg.address_lo = MSM_PCIE_MSI_PHY;
+	msg.data = irq - irq_find_mapping(dev->irq_domain, 0);
+	write_msi_msg(irq, &msg);
+
+	return 0;
+}
+
+static int msm_pcie_create_irq_qgic(struct msm_pcie_dev_t *dev)
+{
+	int irq, pos;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+again:
+	pos = find_first_zero_bit(dev->msi_irq_in_use, PCIE_MSI_NR_IRQS);
+
+	if (pos >= PCIE_MSI_NR_IRQS)
+		return -ENOSPC;
+
+	PCIE_DBG(dev, "pos:%d msi_irq_in_use:%ld\n", pos, *dev->msi_irq_in_use);
+
+	if (test_and_set_bit(pos, dev->msi_irq_in_use))
+		goto again;
+	else
+		PCIE_DBG(dev, "test_and_set_bit is successful pos=%d\n", pos);
+
+	if (pos >= MSM_PCIE_MAX_MSI) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d: pos %d is not less than %d\n",
+			dev->rc_idx, pos, MSM_PCIE_MAX_MSI);
+		return MSM_PCIE_ERROR;
+	}
+
+	irq = dev->msi[pos].num;
+	if (!irq) {
+		PCIE_ERR(dev, "PCIe: RC%d failed to create QGIC MSI IRQ.\n",
+			dev->rc_idx);
+		return -EINVAL;
+	}
+
+	return irq;
+}
+
+static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
+		struct msi_desc *desc, int nvec)
+{
+	int irq, index, firstirq = 0;
+	struct msi_msg msg;
+	struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	for (index = 0; index < nvec; index++) {
+		irq = msm_pcie_create_irq_qgic(dev);
+		PCIE_DBG(dev, "irq %d is allocated\n", irq);
+
+		if (irq < 0)
+			return irq;
+
+		if (index == 0)
+			firstirq = irq;
+
+		irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+	}
+
+	/* write msi vector and data */
+	irq_set_msi_desc(firstirq, desc);
+	msg.address_hi = 0;
+	msg.address_lo = dev->msi_gicm_addr;
+	msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
+	write_msi_msg(firstirq, &msg);
+
+	return 0;
+}
+
+int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+{
+	struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	if (dev->msi_gicm_addr)
+		return arch_setup_msi_irq_qgic(pdev, desc, 1);
+	else
+		return arch_setup_msi_irq_default(pdev, desc, 1);
+}
+
+static int msm_pcie_get_msi_multiple(int nvec)
+{
+	int msi_multiple = 0;
+
+	while (nvec) {
+		nvec = nvec >> 1;
+		msi_multiple++;
+	}
+	PCIE_GEN_DBG("log2 number of MSI multiple:%d\n",
+		msi_multiple - 1);
+
+	return msi_multiple - 1;
+}
+
+int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+	struct msi_desc *entry;
+	int ret;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+	if (type != PCI_CAP_ID_MSI || nvec > 32)
+		return -ENOSPC;
+
+	PCIE_DBG(pcie_dev, "nvec = %d\n", nvec);
+
+	list_for_each_entry(entry, &dev->dev.msi_list, list) {
+		entry->msi_attrib.multiple =
+				msm_pcie_get_msi_multiple(nvec);
+
+		if (pcie_dev->msi_gicm_addr)
+			ret = arch_setup_msi_irq_qgic(dev, entry, nvec);
+		else
+			ret = arch_setup_msi_irq_default(dev, entry, nvec);
+
+		PCIE_DBG(pcie_dev, "ret from msi_irq: %d\n", ret);
+
+		if (ret < 0)
+			return ret;
+		if (ret > 0)
+			return -ENOSPC;
+	}
+
+	pcie_dev->use_msi = true;
+
+	return 0;
+}
+
+static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+	   irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+	return 0;
+}
+
+static const struct irq_domain_ops msm_pcie_msi_ops = {
+	.map = msm_pcie_msi_map,
+};
+
+int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
+{
+	int rc;
+	int msi_start =  0;
+	struct device *pdev = &dev->pdev->dev;
+
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	if (dev->rc_idx)
+		wakeup_source_init(&dev->ws, "RC1 pcie_wakeup_source");
+	else
+		wakeup_source_init(&dev->ws, "RC0 pcie_wakeup_source");
+
+	/* register handler for linkdown interrupt */
+	if (dev->irq[MSM_PCIE_INT_LINK_DOWN].num) {
+		rc = devm_request_irq(pdev,
+			dev->irq[MSM_PCIE_INT_LINK_DOWN].num,
+			handle_linkdown_irq,
+			IRQF_TRIGGER_RISING,
+			dev->irq[MSM_PCIE_INT_LINK_DOWN].name,
+			dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: Unable to request linkdown interrupt:%d\n",
+				dev->irq[MSM_PCIE_INT_LINK_DOWN].num);
+			return rc;
+		}
+	}
+
+	/* register handler for physical MSI interrupt line */
+	if (dev->irq[MSM_PCIE_INT_MSI].num) {
+		rc = devm_request_irq(pdev,
+			dev->irq[MSM_PCIE_INT_MSI].num,
+			handle_msi_irq,
+			IRQF_TRIGGER_RISING,
+			dev->irq[MSM_PCIE_INT_MSI].name,
+			dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to request MSI interrupt\n",
+				dev->rc_idx);
+			return rc;
+		}
+	}
+
+	/* register handler for AER interrupt */
+	if (dev->irq[MSM_PCIE_INT_PLS_ERR].num) {
+		rc = devm_request_irq(pdev,
+				dev->irq[MSM_PCIE_INT_PLS_ERR].num,
+				handle_aer_irq,
+				IRQF_TRIGGER_RISING,
+				dev->irq[MSM_PCIE_INT_PLS_ERR].name,
+				dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to request aer pls_err interrupt: %d\n",
+				dev->rc_idx,
+				dev->irq[MSM_PCIE_INT_PLS_ERR].num);
+			return rc;
+		}
+	}
+
+	/* register handler for AER legacy interrupt */
+	if (dev->irq[MSM_PCIE_INT_AER_LEGACY].num) {
+		rc = devm_request_irq(pdev,
+				dev->irq[MSM_PCIE_INT_AER_LEGACY].num,
+				handle_aer_irq,
+				IRQF_TRIGGER_RISING,
+				dev->irq[MSM_PCIE_INT_AER_LEGACY].name,
+				dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to request aer aer_legacy interrupt: %d\n",
+				dev->rc_idx,
+				dev->irq[MSM_PCIE_INT_AER_LEGACY].num);
+			return rc;
+		}
+	}
+
+	if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
+		rc = devm_request_irq(pdev,
+				dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
+				handle_global_irq,
+				IRQF_TRIGGER_RISING,
+				dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
+				dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to request global_int interrupt: %d\n",
+				dev->rc_idx,
+				dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
+			return rc;
+		}
+	}
+
+	/* register handler for PCIE_WAKE_N interrupt line */
+	if (dev->wake_n) {
+		rc = devm_request_irq(pdev,
+				dev->wake_n, handle_wake_irq,
+				IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to request wake interrupt\n",
+				dev->rc_idx);
+			return rc;
+		}
+
+		INIT_WORK(&dev->handle_wake_work, handle_wake_func);
+
+		rc = enable_irq_wake(dev->wake_n);
+		if (rc) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to enable wake interrupt\n",
+				dev->rc_idx);
+			return rc;
+		}
+	}
+
+	/* Create a virtual domain of interrupts */
+	if (!dev->msi_gicm_addr) {
+		dev->irq_domain = irq_domain_add_linear(dev->pdev->dev.of_node,
+			PCIE_MSI_NR_IRQS, &msm_pcie_msi_ops, dev);
+
+		if (!dev->irq_domain) {
+			PCIE_ERR(dev,
+				"PCIe: RC%d: Unable to initialize irq domain\n",
+				dev->rc_idx);
+
+			if (dev->wake_n)
+				disable_irq(dev->wake_n);
+
+			return PTR_ERR(dev->irq_domain);
+		}
+
+		msi_start = irq_create_mapping(dev->irq_domain, 0);
+	}
+
+	return 0;
+}
+
+void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
+{
+	PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
+
+	wakeup_source_trash(&dev->ws);
+
+	if (dev->wake_n)
+		disable_irq(dev->wake_n);
+}
+
+
+static int msm_pcie_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int rc_idx = -1;
+	int i, j;
+
+	PCIE_GEN_DBG("%s\n", __func__);
+
+	mutex_lock(&pcie_drv.drv_lock);
+
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"cell-index", &rc_idx);
+	if (ret) {
+		PCIE_GEN_DBG("Did not find RC index.\n");
+		goto out;
+	} else {
+		if (rc_idx >= MAX_RC_NUM) {
+			pr_err(
+				"PCIe: Invalid RC Index %d (max supported = %d)\n",
+				rc_idx, MAX_RC_NUM);
+			goto out;
+		}
+		pcie_drv.rc_num++;
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIe: RC index is %d.\n",
+			rc_idx);
+	}
+
+	msm_pcie_dev[rc_idx].l0s_supported =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,l0s-supported");
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "L0s is %s supported.\n",
+		msm_pcie_dev[rc_idx].l0s_supported ? "" : "not");
+	msm_pcie_dev[rc_idx].l1_supported =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,l1-supported");
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "L1 is %s supported.\n",
+		msm_pcie_dev[rc_idx].l1_supported ? "" : "not");
+	msm_pcie_dev[rc_idx].l1ss_supported =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,l1ss-supported");
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "L1ss is %s supported.\n",
+		msm_pcie_dev[rc_idx].l1ss_supported ? "" : "not");
+	msm_pcie_dev[rc_idx].common_clk_en =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,common-clk-en");
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "Common clock is %s enabled.\n",
+		msm_pcie_dev[rc_idx].common_clk_en ? "" : "not");
+	msm_pcie_dev[rc_idx].clk_power_manage_en =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,clk-power-manage-en");
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"Clock power management is %s enabled.\n",
+		msm_pcie_dev[rc_idx].clk_power_manage_en ? "" : "not");
+	msm_pcie_dev[rc_idx].aux_clk_sync =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,aux-clk-sync");
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"AUX clock is %s synchronous to Core clock.\n",
+		msm_pcie_dev[rc_idx].aux_clk_sync ? "" : "not");
+
+	msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-19p2mhz-aux-clk");
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"AUX clock frequency is %s 19.2MHz.\n",
+		msm_pcie_dev[rc_idx].use_19p2mhz_aux_clk ? "" : "not");
+
+	msm_pcie_dev[rc_idx].smmu_exist =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,smmu-exist");
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"SMMU does %s exist.\n",
+		msm_pcie_dev[rc_idx].smmu_exist ? "" : "not");
+
+	msm_pcie_dev[rc_idx].smmu_sid_base = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,smmu-sid-base",
+				&msm_pcie_dev[rc_idx].smmu_sid_base);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d SMMU sid base not found\n",
+			msm_pcie_dev[rc_idx].rc_idx);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: qcom,smmu-sid-base: 0x%x.\n",
+			msm_pcie_dev[rc_idx].rc_idx,
+			msm_pcie_dev[rc_idx].smmu_sid_base);
+
+	msm_pcie_dev[rc_idx].ep_wakeirq =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ep-wakeirq");
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"PCIe: EP of RC%d does %s assert wake when it is up.\n",
+		rc_idx, msm_pcie_dev[rc_idx].ep_wakeirq ? "" : "not");
+
+	msm_pcie_dev[rc_idx].phy_ver = 1;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,pcie-phy-ver",
+				&msm_pcie_dev[rc_idx].phy_ver);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: pcie-phy-ver does not exist.\n",
+			msm_pcie_dev[rc_idx].rc_idx);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: pcie-phy-ver: %d.\n",
+			msm_pcie_dev[rc_idx].rc_idx,
+			msm_pcie_dev[rc_idx].phy_ver);
+
+	msm_pcie_dev[rc_idx].n_fts = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,n-fts",
+				&msm_pcie_dev[rc_idx].n_fts);
+
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"n-fts does not exist. ret=%d\n", ret);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "n-fts: 0x%x.\n",
+				msm_pcie_dev[rc_idx].n_fts);
+
+	msm_pcie_dev[rc_idx].common_phy =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,common-phy");
+	PCIE_DBG(&msm_pcie_dev[rc_idx],
+		"PCIe: RC%d: Common PHY does %s exist.\n",
+		rc_idx, msm_pcie_dev[rc_idx].common_phy ? "" : "not");
+
+	msm_pcie_dev[rc_idx].ext_ref_clk =
+		of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ext-ref-clk");
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "ref clk is %s.\n",
+		msm_pcie_dev[rc_idx].ext_ref_clk ? "external" : "internal");
+
+	msm_pcie_dev[rc_idx].ep_latency = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ep-latency",
+				&msm_pcie_dev[rc_idx].ep_latency);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: ep-latency does not exist.\n",
+			rc_idx);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
+			rc_idx, msm_pcie_dev[rc_idx].ep_latency);
+
+	msm_pcie_dev[rc_idx].wr_halt_size = 0;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,wr-halt-size",
+				&msm_pcie_dev[rc_idx].wr_halt_size);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: wr-halt-size not specified in dt. Use default value.\n",
+			rc_idx);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: wr-halt-size: 0x%x.\n",
+			rc_idx, msm_pcie_dev[rc_idx].wr_halt_size);
+
+	msm_pcie_dev[rc_idx].cpl_timeout = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,cpl-timeout",
+				&msm_pcie_dev[rc_idx].cpl_timeout);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: Using default cpl-timeout.\n",
+			rc_idx);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: cpl-timeout: 0x%x.\n",
+			rc_idx, msm_pcie_dev[rc_idx].cpl_timeout);
+
+	msm_pcie_dev[rc_idx].perst_delay_us_min =
+		PERST_PROPAGATION_DELAY_US_MIN;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,perst-delay-us-min",
+				&msm_pcie_dev[rc_idx].perst_delay_us_min);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: perst-delay-us-min does not exist. Use default value %dus.\n",
+			rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: perst-delay-us-min: %dus.\n",
+			rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_min);
+
+	msm_pcie_dev[rc_idx].perst_delay_us_max =
+		PERST_PROPAGATION_DELAY_US_MAX;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,perst-delay-us-max",
+				&msm_pcie_dev[rc_idx].perst_delay_us_max);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: perst-delay-us-max does not exist. Use default value %dus.\n",
+			rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: perst-delay-us-max: %dus.\n",
+			rc_idx, msm_pcie_dev[rc_idx].perst_delay_us_max);
+
+	msm_pcie_dev[rc_idx].tlp_rd_size = PCIE_TLP_RD_SIZE;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,tlp-rd-size",
+				&msm_pcie_dev[rc_idx].tlp_rd_size);
+	if (ret)
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"RC%d: tlp-rd-size does not exist. tlp-rd-size: 0x%x.\n",
+			rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
+	else
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: tlp-rd-size: 0x%x.\n",
+			rc_idx, msm_pcie_dev[rc_idx].tlp_rd_size);
+
+	msm_pcie_dev[rc_idx].msi_gicm_addr = 0;
+	msm_pcie_dev[rc_idx].msi_gicm_base = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,msi-gicm-addr",
+				&msm_pcie_dev[rc_idx].msi_gicm_addr);
+
+	if (ret) {
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "%s",
+			"msi-gicm-addr does not exist.\n");
+	} else {
+		PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-addr: 0x%x.\n",
+				msm_pcie_dev[rc_idx].msi_gicm_addr);
+
+		ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,msi-gicm-base",
+				&msm_pcie_dev[rc_idx].msi_gicm_base);
+
+		if (ret) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: msi-gicm-base does not exist.\n",
+				rc_idx);
+			goto decrease_rc_num;
+		} else {
+			PCIE_DBG(&msm_pcie_dev[rc_idx], "msi-gicm-base: 0x%x\n",
+					msm_pcie_dev[rc_idx].msi_gicm_base);
+		}
+	}
+
+	msm_pcie_dev[rc_idx].scm_dev_id = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,scm-dev-id",
+				&msm_pcie_dev[rc_idx].scm_dev_id);
+
+	msm_pcie_dev[rc_idx].rc_idx = rc_idx;
+	msm_pcie_dev[rc_idx].pdev = pdev;
+	msm_pcie_dev[rc_idx].vreg_n = 0;
+	msm_pcie_dev[rc_idx].gpio_n = 0;
+	msm_pcie_dev[rc_idx].parf_deemph = 0;
+	msm_pcie_dev[rc_idx].parf_swing = 0;
+	msm_pcie_dev[rc_idx].link_status = MSM_PCIE_LINK_DEINIT;
+	msm_pcie_dev[rc_idx].user_suspend = false;
+	msm_pcie_dev[rc_idx].disable_pc = false;
+	msm_pcie_dev[rc_idx].saved_state = NULL;
+	msm_pcie_dev[rc_idx].enumerated = false;
+	msm_pcie_dev[rc_idx].num_active_ep = 0;
+	msm_pcie_dev[rc_idx].num_ep = 0;
+	msm_pcie_dev[rc_idx].pending_ep_reg = false;
+	msm_pcie_dev[rc_idx].phy_len = 0;
+	msm_pcie_dev[rc_idx].port_phy_len = 0;
+	msm_pcie_dev[rc_idx].phy_sequence = NULL;
+	msm_pcie_dev[rc_idx].port_phy_sequence = NULL;
+	msm_pcie_dev[rc_idx].event_reg = NULL;
+	msm_pcie_dev[rc_idx].linkdown_counter = 0;
+	msm_pcie_dev[rc_idx].link_turned_on_counter = 0;
+	msm_pcie_dev[rc_idx].link_turned_off_counter = 0;
+	msm_pcie_dev[rc_idx].rc_corr_counter = 0;
+	msm_pcie_dev[rc_idx].rc_non_fatal_counter = 0;
+	msm_pcie_dev[rc_idx].rc_fatal_counter = 0;
+	msm_pcie_dev[rc_idx].ep_corr_counter = 0;
+	msm_pcie_dev[rc_idx].ep_non_fatal_counter = 0;
+	msm_pcie_dev[rc_idx].ep_fatal_counter = 0;
+	msm_pcie_dev[rc_idx].suspending = false;
+	msm_pcie_dev[rc_idx].wake_counter = 0;
+	msm_pcie_dev[rc_idx].aer_enable = true;
+	msm_pcie_dev[rc_idx].power_on = false;
+	msm_pcie_dev[rc_idx].current_short_bdf = 0;
+	msm_pcie_dev[rc_idx].use_msi = false;
+	msm_pcie_dev[rc_idx].use_pinctrl = false;
+	msm_pcie_dev[rc_idx].linkdown_panic = false;
+	msm_pcie_dev[rc_idx].bridge_found = false;
+	memcpy(msm_pcie_dev[rc_idx].vreg, msm_pcie_vreg_info,
+				sizeof(msm_pcie_vreg_info));
+	memcpy(msm_pcie_dev[rc_idx].gpio, msm_pcie_gpio_info,
+				sizeof(msm_pcie_gpio_info));
+	memcpy(msm_pcie_dev[rc_idx].clk, msm_pcie_clk_info[rc_idx],
+				sizeof(msm_pcie_clk_info[rc_idx]));
+	memcpy(msm_pcie_dev[rc_idx].pipeclk, msm_pcie_pipe_clk_info[rc_idx],
+				sizeof(msm_pcie_pipe_clk_info[rc_idx]));
+	memcpy(msm_pcie_dev[rc_idx].res, msm_pcie_res_info,
+				sizeof(msm_pcie_res_info));
+	memcpy(msm_pcie_dev[rc_idx].irq, msm_pcie_irq_info,
+				sizeof(msm_pcie_irq_info));
+	memcpy(msm_pcie_dev[rc_idx].msi, msm_pcie_msi_info,
+				sizeof(msm_pcie_msi_info));
+	memcpy(msm_pcie_dev[rc_idx].reset, msm_pcie_reset_info[rc_idx],
+				sizeof(msm_pcie_reset_info[rc_idx]));
+	memcpy(msm_pcie_dev[rc_idx].pipe_reset,
+			msm_pcie_pipe_reset_info[rc_idx],
+			sizeof(msm_pcie_pipe_reset_info[rc_idx]));
+	msm_pcie_dev[rc_idx].shadow_en = true;
+	for (i = 0; i < PCIE_CONF_SPACE_DW; i++)
+		msm_pcie_dev[rc_idx].rc_shadow[i] = PCIE_CLEAR;
+	for (i = 0; i < MAX_DEVICE_NUM; i++)
+		for (j = 0; j < PCIE_CONF_SPACE_DW; j++)
+			msm_pcie_dev[rc_idx].ep_shadow[i][j] = PCIE_CLEAR;
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		msm_pcie_dev[rc_idx].pcidev_table[i].bdf = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].dev = NULL;
+		msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx;
+		msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0;
+		msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL;
+		msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
+	}
+
+	ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
+				msm_pcie_dev[rc_idx].pdev);
+
+	if (ret)
+		goto decrease_rc_num;
+
+	msm_pcie_dev[rc_idx].pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(msm_pcie_dev[rc_idx].pinctrl))
+		PCIE_ERR(&msm_pcie_dev[rc_idx],
+			"PCIe: RC%d failed to get pinctrl\n",
+			rc_idx);
+	else
+		msm_pcie_dev[rc_idx].use_pinctrl = true;
+
+	if (msm_pcie_dev[rc_idx].use_pinctrl) {
+		msm_pcie_dev[rc_idx].pins_default =
+			pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
+						"default");
+		if (IS_ERR(msm_pcie_dev[rc_idx].pins_default)) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d could not get pinctrl default state\n",
+				rc_idx);
+			msm_pcie_dev[rc_idx].pins_default = NULL;
+		}
+
+		msm_pcie_dev[rc_idx].pins_sleep =
+			pinctrl_lookup_state(msm_pcie_dev[rc_idx].pinctrl,
+						"sleep");
+		if (IS_ERR(msm_pcie_dev[rc_idx].pins_sleep)) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d could not get pinctrl sleep state\n",
+				rc_idx);
+			msm_pcie_dev[rc_idx].pins_sleep = NULL;
+		}
+	}
+
+	ret = msm_pcie_gpio_init(&msm_pcie_dev[rc_idx]);
+	if (ret) {
+		msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
+		goto decrease_rc_num;
+	}
+
+	ret = msm_pcie_irq_init(&msm_pcie_dev[rc_idx]);
+	if (ret) {
+		msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
+		msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
+		goto decrease_rc_num;
+	}
+
+	msm_pcie_dev[rc_idx].drv_ready = true;
+
+	if (msm_pcie_dev[rc_idx].ep_wakeirq) {
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"PCIe: RC%d will be enumerated upon WAKE signal from Endpoint.\n",
+			rc_idx);
+		mutex_unlock(&pcie_drv.drv_lock);
+		return 0;
+	}
+
+	ret = msm_pcie_enumerate(rc_idx);
+
+	if (ret)
+		PCIE_ERR(&msm_pcie_dev[rc_idx],
+			"PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
+			rc_idx);
+	else
+		PCIE_ERR(&msm_pcie_dev[rc_idx], "RC%d is enabled in bootup\n",
+			rc_idx);
+
+	PCIE_DBG(&msm_pcie_dev[rc_idx], "PCIE probed %s\n",
+		dev_name(&(pdev->dev)));
+
+	mutex_unlock(&pcie_drv.drv_lock);
+	return 0;
+
+decrease_rc_num:
+	pcie_drv.rc_num--;
+out:
+	if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
+		pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
+		rc_idx);
+	else
+		PCIE_ERR(&msm_pcie_dev[rc_idx],
+			"PCIe: Driver probe failed for RC%d:%d\n",
+			rc_idx, ret);
+
+	mutex_unlock(&pcie_drv.drv_lock);
+
+	return ret;
+}
+
+static int msm_pcie_remove(struct platform_device *pdev)
+{
+	int ret = 0;
+	int rc_idx;
+
+	PCIE_GEN_DBG("PCIe:%s.\n", __func__);
+
+	mutex_lock(&pcie_drv.drv_lock);
+
+	ret = of_property_read_u32((&pdev->dev)->of_node,
+				"cell-index", &rc_idx);
+	if (ret) {
+		pr_err("%s: Did not find RC index.\n", __func__);
+		goto out;
+	} else {
+		pcie_drv.rc_num--;
+		PCIE_GEN_DBG("%s: RC index is 0x%x.", __func__, rc_idx);
+	}
+
+	msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
+	msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
+	msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
+	msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
+	msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
+
+out:
+	mutex_unlock(&pcie_drv.drv_lock);
+
+	return ret;
+}
+
+static const struct of_device_id msm_pcie_match[] = {
+	{	.compatible = "qcom,pci-msm",
+	},
+	{}
+};
+
+static struct platform_driver msm_pcie_driver = {
+	.probe	= msm_pcie_probe,
+	.remove	= msm_pcie_remove,
+	.driver	= {
+		.name		= "pci-msm",
+		.owner		= THIS_MODULE,
+		.of_match_table	= msm_pcie_match,
+	},
+};
+
+int __init pcie_init(void)
+{
+	int ret = 0, i;
+	char rc_name[MAX_RC_NAME_LEN];
+
+	pr_alert("pcie:%s.\n", __func__);
+
+	pcie_drv.rc_num = 0;
+	mutex_init(&pcie_drv.drv_lock);
+	mutex_init(&com_phy_lock);
+
+	for (i = 0; i < MAX_RC_NUM; i++) {
+		snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
+		msm_pcie_dev[i].ipc_log =
+			ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
+		if (msm_pcie_dev[i].ipc_log == NULL)
+			pr_err("%s: unable to create IPC log context for %s\n",
+				__func__, rc_name);
+		else
+			PCIE_DBG(&msm_pcie_dev[i],
+				"PCIe IPC logging is enable for RC%d\n",
+				i);
+		snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
+		msm_pcie_dev[i].ipc_log_long =
+			ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
+		if (msm_pcie_dev[i].ipc_log_long == NULL)
+			pr_err("%s: unable to create IPC log context for %s\n",
+				__func__, rc_name);
+		else
+			PCIE_DBG(&msm_pcie_dev[i],
+				"PCIe IPC logging %s is enable for RC%d\n",
+				rc_name, i);
+		snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
+		msm_pcie_dev[i].ipc_log_dump =
+			ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
+		if (msm_pcie_dev[i].ipc_log_dump == NULL)
+			pr_err("%s: unable to create IPC log context for %s\n",
+				__func__, rc_name);
+		else
+			PCIE_DBG(&msm_pcie_dev[i],
+				"PCIe IPC logging %s is enable for RC%d\n",
+				rc_name, i);
+		spin_lock_init(&msm_pcie_dev[i].cfg_lock);
+		msm_pcie_dev[i].cfg_access = true;
+		mutex_init(&msm_pcie_dev[i].enumerate_lock);
+		mutex_init(&msm_pcie_dev[i].setup_lock);
+		mutex_init(&msm_pcie_dev[i].recovery_lock);
+		spin_lock_init(&msm_pcie_dev[i].linkdown_lock);
+		spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
+		spin_lock_init(&msm_pcie_dev[i].global_irq_lock);
+		spin_lock_init(&msm_pcie_dev[i].aer_lock);
+		msm_pcie_dev[i].drv_ready = false;
+	}
+	for (i = 0; i < MAX_RC_NUM * MAX_DEVICE_NUM; i++) {
+		msm_pcie_dev_tbl[i].bdf = 0;
+		msm_pcie_dev_tbl[i].dev = NULL;
+		msm_pcie_dev_tbl[i].short_bdf = 0;
+		msm_pcie_dev_tbl[i].sid = 0;
+		msm_pcie_dev_tbl[i].domain = -1;
+		msm_pcie_dev_tbl[i].conf_base = 0;
+		msm_pcie_dev_tbl[i].phy_address = 0;
+		msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0;
+		msm_pcie_dev_tbl[i].event_reg = NULL;
+		msm_pcie_dev_tbl[i].registered = true;
+	}
+
+	msm_pcie_debugfs_init();
+
+	ret = platform_driver_register(&msm_pcie_driver);
+
+	return ret;
+}
+
+static void __exit pcie_exit(void)
+{
+	PCIE_GEN_DBG("pcie:%s.\n", __func__);
+
+	platform_driver_unregister(&msm_pcie_driver);
+
+	msm_pcie_debugfs_exit();
+}
+
+subsys_initcall_sync(pcie_init);
+module_exit(pcie_exit);
+
+
+/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
+static void msm_pcie_fixup_early(struct pci_dev *dev)
+{
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
+	if (dev->hdr_type == 1)
+		dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
+}
+DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+			msm_pcie_fixup_early);
+
+/* Suspend the PCIe link */
+static int msm_pcie_pm_suspend(struct pci_dev *dev,
+			void *user, void *data, u32 options)
+{
+	int ret = 0;
+	u32 val = 0;
+	int ret_l23;
+	unsigned long irqsave_flags;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
+
+	spin_lock_irqsave(&pcie_dev->aer_lock, irqsave_flags);
+	pcie_dev->suspending = true;
+	spin_unlock_irqrestore(&pcie_dev->aer_lock, irqsave_flags);
+
+	if (!pcie_dev->power_on) {
+		PCIE_DBG(pcie_dev,
+			"PCIe: power of RC%d has been turned off.\n",
+			pcie_dev->rc_idx);
+		return ret;
+	}
+
+	if (dev && !(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)
+		&& msm_pcie_confirm_linkup(pcie_dev, true, true,
+			pcie_dev->conf)) {
+		ret = pci_save_state(dev);
+		pcie_dev->saved_state =	pci_store_saved_state(dev);
+	}
+	if (ret) {
+		PCIE_ERR(pcie_dev, "PCIe: fail to save state of RC%d:%d.\n",
+			pcie_dev->rc_idx, ret);
+		pcie_dev->suspending = false;
+		return ret;
+	}
+
+	spin_lock_irqsave(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+	pcie_dev->cfg_access = false;
+	spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+
+	msm_pcie_write_mask(pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL, 0,
+				BIT(4));
+
+	PCIE_DBG(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
+		pcie_dev->rc_idx);
+
+	ret_l23 = readl_poll_timeout((pcie_dev->parf
+		+ PCIE20_PARF_PM_STTS), val, (val & BIT(5)), 10000, 100000);
+
+	/* check L23_Ready */
+	PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS is 0x%x.\n",
+		pcie_dev->rc_idx,
+		readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS));
+	if (!ret_l23)
+		PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
+			pcie_dev->rc_idx);
+	else
+		PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
+			pcie_dev->rc_idx);
+
+		msm_pcie_disable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
+
+	if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
+		pinctrl_select_state(pcie_dev->pinctrl,
+					pcie_dev->pins_sleep);
+
+	PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
+
+	return ret;
+}
+
+static void msm_pcie_fixup_suspend(struct pci_dev *dev)
+{
+	int ret;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+	if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
+		return;
+
+	spin_lock_irqsave(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+	if (pcie_dev->disable_pc) {
+		PCIE_DBG(pcie_dev,
+			"RC%d: Skip suspend because of user request\n",
+			pcie_dev->rc_idx);
+		spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+		return;
+	}
+	spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+
+	mutex_lock(&pcie_dev->recovery_lock);
+
+	ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
+	if (ret)
+		PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
+			pcie_dev->rc_idx, ret);
+
+	mutex_unlock(&pcie_dev->recovery_lock);
+}
+DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+			  msm_pcie_fixup_suspend);
+
+/* Resume the PCIe link */
+static int msm_pcie_pm_resume(struct pci_dev *dev,
+			void *user, void *data, u32 options)
+{
+	int ret;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
+
+	if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
+		pinctrl_select_state(pcie_dev->pinctrl,
+					pcie_dev->pins_default);
+
+	spin_lock_irqsave(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+	pcie_dev->cfg_access = true;
+	spin_unlock_irqrestore(&pcie_dev->cfg_lock,
+				pcie_dev->irqsave_flags);
+
+	ret = msm_pcie_enable(pcie_dev, PM_PIPE_CLK | PM_CLK | PM_VREG);
+	if (ret) {
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d fail to enable PCIe link in resume.\n",
+			pcie_dev->rc_idx);
+		return ret;
+	}
+
+	pcie_dev->suspending = false;
+	PCIE_DBG(pcie_dev,
+		"dev->bus->number = %d dev->bus->primary = %d\n",
+		 dev->bus->number, dev->bus->primary);
+
+	if (!(options & MSM_PCIE_CONFIG_NO_CFG_RESTORE)) {
+		PCIE_DBG(pcie_dev,
+			"RC%d: entry of PCI framework restore state\n",
+			pcie_dev->rc_idx);
+
+		pci_load_and_free_saved_state(dev,
+				&pcie_dev->saved_state);
+		pci_restore_state(dev);
+
+		PCIE_DBG(pcie_dev,
+			"RC%d: exit of PCI framework restore state\n",
+			pcie_dev->rc_idx);
+	}
+
+	if (pcie_dev->bridge_found) {
+		PCIE_DBG(pcie_dev,
+			"RC%d: entry of PCIe recover config\n",
+			pcie_dev->rc_idx);
+
+		msm_pcie_recover_config(dev);
+
+		PCIE_DBG(pcie_dev,
+			"RC%d: exit of PCIe recover config\n",
+			pcie_dev->rc_idx);
+	}
+
+	PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
+
+	return ret;
+}
+
+void msm_pcie_fixup_resume(struct pci_dev *dev)
+{
+	int ret;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+	if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
+		pcie_dev->user_suspend)
+		return;
+
+	mutex_lock(&pcie_dev->recovery_lock);
+	ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
+	if (ret)
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d got failure in fixup resume:%d.\n",
+			pcie_dev->rc_idx, ret);
+
+	mutex_unlock(&pcie_dev->recovery_lock);
+}
+DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+				 msm_pcie_fixup_resume);
+
+void msm_pcie_fixup_resume_early(struct pci_dev *dev)
+{
+	int ret;
+	struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+
+	PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
+
+	if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
+		pcie_dev->user_suspend)
+		return;
+
+	mutex_lock(&pcie_dev->recovery_lock);
+	ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
+	if (ret)
+		PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
+			pcie_dev->rc_idx, ret);
+
+	mutex_unlock(&pcie_dev->recovery_lock);
+}
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP,
+				 msm_pcie_fixup_resume_early);
+
+int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
+			void *data, u32 options)
+{
+	int i, ret = 0;
+	struct pci_dev *dev;
+	u32 rc_idx = 0;
+	struct msm_pcie_dev_t *pcie_dev;
+
+	PCIE_GEN_DBG("PCIe: pm_opt:%d;busnr:%d;options:%d\n",
+		pm_opt, busnr, options);
+
+
+	if (!user) {
+		pr_err("PCIe: endpoint device is NULL\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
+
+	if (pcie_dev) {
+		rc_idx = pcie_dev->rc_idx;
+		PCIE_DBG(pcie_dev,
+			"PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
+			rc_idx, pm_opt, busnr, options);
+	} else {
+		pr_err(
+			"PCIe: did not find RC for pci endpoint device.\n"
+			);
+		ret = -ENODEV;
+		goto out;
+	}
+
+	for (i = 0; i < MAX_DEVICE_NUM; i++) {
+		if (!busnr)
+			break;
+		if (user == pcie_dev->pcidev_table[i].dev) {
+			if (busnr == pcie_dev->pcidev_table[i].bdf >> 24)
+				break;
+
+			PCIE_ERR(pcie_dev,
+				"PCIe: RC%d: bus number %d does not match with the expected value %d\n",
+				pcie_dev->rc_idx, busnr,
+				pcie_dev->pcidev_table[i].bdf >> 24);
+			ret = MSM_PCIE_ERROR;
+			goto out;
+		}
+	}
+
+	if (i == MAX_DEVICE_NUM) {
+		PCIE_ERR(pcie_dev,
+			"PCIe: RC%d: endpoint device was not found in device table",
+			pcie_dev->rc_idx);
+		ret = MSM_PCIE_ERROR;
+		goto out;
+	}
+
+	dev = msm_pcie_dev[rc_idx].dev;
+
+	if (!msm_pcie_dev[rc_idx].drv_ready) {
+		PCIE_ERR(&msm_pcie_dev[rc_idx],
+			"RC%d has not been successfully probed yet\n",
+			rc_idx);
+		return -EPROBE_DEFER;
+	}
+
+	switch (pm_opt) {
+	case MSM_PCIE_SUSPEND:
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"User of RC%d requests to suspend the link\n", rc_idx);
+		if (msm_pcie_dev[rc_idx].link_status != MSM_PCIE_LINK_ENABLED)
+			PCIE_DBG(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
+				rc_idx, msm_pcie_dev[rc_idx].link_status);
+
+		if (!msm_pcie_dev[rc_idx].power_on) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
+				rc_idx, msm_pcie_dev[rc_idx].link_status);
+			break;
+		}
+
+		if (msm_pcie_dev[rc_idx].pending_ep_reg) {
+			PCIE_DBG(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: request to suspend the link is rejected\n",
+				rc_idx);
+			break;
+		}
+
+		if (pcie_dev->num_active_ep) {
+			PCIE_DBG(pcie_dev,
+				"RC%d: an EP requested to suspend the link, but other EPs are still active: %d\n",
+				pcie_dev->rc_idx, pcie_dev->num_active_ep);
+			return ret;
+		}
+
+		msm_pcie_dev[rc_idx].user_suspend = true;
+
+		mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
+
+		ret = msm_pcie_pm_suspend(dev, user, data, options);
+		if (ret) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: user failed to suspend the link.\n",
+				rc_idx);
+			msm_pcie_dev[rc_idx].user_suspend = false;
+		}
+
+		mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
+		break;
+	case MSM_PCIE_RESUME:
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"User of RC%d requests to resume the link\n", rc_idx);
+		if (msm_pcie_dev[rc_idx].link_status !=
+					MSM_PCIE_LINK_DISABLED) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n",
+				rc_idx, msm_pcie_dev[rc_idx].link_status,
+				msm_pcie_dev[rc_idx].num_active_ep);
+			break;
+		}
+
+		mutex_lock(&msm_pcie_dev[rc_idx].recovery_lock);
+		ret = msm_pcie_pm_resume(dev, user, data, options);
+		if (ret) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: user failed to resume the link.\n",
+				rc_idx);
+		} else {
+			PCIE_DBG(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d: user succeeded to resume the link.\n",
+				rc_idx);
+
+			msm_pcie_dev[rc_idx].user_suspend = false;
+		}
+
+		mutex_unlock(&msm_pcie_dev[rc_idx].recovery_lock);
+
+		break;
+	case MSM_PCIE_DISABLE_PC:
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"User of RC%d requests to keep the link always alive.\n",
+			rc_idx);
+		spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
+				msm_pcie_dev[rc_idx].irqsave_flags);
+		if (msm_pcie_dev[rc_idx].suspending) {
+			PCIE_ERR(&msm_pcie_dev[rc_idx],
+				"PCIe: RC%d Link has been suspended before request\n",
+				rc_idx);
+			ret = MSM_PCIE_ERROR;
+		} else {
+			msm_pcie_dev[rc_idx].disable_pc = true;
+		}
+		spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
+				msm_pcie_dev[rc_idx].irqsave_flags);
+		break;
+	case MSM_PCIE_ENABLE_PC:
+		PCIE_DBG(&msm_pcie_dev[rc_idx],
+			"User of RC%d cancels the request of alive link.\n",
+			rc_idx);
+		spin_lock_irqsave(&msm_pcie_dev[rc_idx].cfg_lock,
+				msm_pcie_dev[rc_idx].irqsave_flags);
+		msm_pcie_dev[rc_idx].disable_pc = false;
+		spin_unlock_irqrestore(&msm_pcie_dev[rc_idx].cfg_lock,
+				msm_pcie_dev[rc_idx].irqsave_flags);
+		break;
+	default:
+		PCIE_ERR(&msm_pcie_dev[rc_idx],
+			"PCIe: RC%d: unsupported pm operation:%d.\n",
+			rc_idx, pm_opt);
+		ret = -ENODEV;
+		goto out;
+	}
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_pm_control);
+
+int msm_pcie_register_event(struct msm_pcie_register_event *reg)
+{
+	int i, ret = 0;
+	struct msm_pcie_dev_t *pcie_dev;
+
+	if (!reg) {
+		pr_err("PCIe: Event registration is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!reg->user) {
+		pr_err("PCIe: User of event registration is NULL\n");
+		return -ENODEV;
+	}
+
+	pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
+
+	if (!pcie_dev) {
+		PCIE_ERR(pcie_dev, "%s",
+			"PCIe: did not find RC for pci endpoint device.\n");
+		return -ENODEV;
+	}
+
+	if (pcie_dev->num_ep > 1) {
+		for (i = 0; i < MAX_DEVICE_NUM; i++) {
+			if (reg->user ==
+				pcie_dev->pcidev_table[i].dev) {
+				pcie_dev->event_reg =
+					pcie_dev->pcidev_table[i].event_reg;
+
+				if (!pcie_dev->event_reg) {
+					pcie_dev->pcidev_table[i].registered =
+						true;
+
+					pcie_dev->num_active_ep++;
+					PCIE_DBG(pcie_dev,
+						"PCIe: RC%d: number of active EP(s): %d.\n",
+						pcie_dev->rc_idx,
+						pcie_dev->num_active_ep);
+				}
+
+				pcie_dev->event_reg = reg;
+				pcie_dev->pcidev_table[i].event_reg = reg;
+				PCIE_DBG(pcie_dev,
+					"Event 0x%x is registered for RC %d\n",
+					reg->events,
+					pcie_dev->rc_idx);
+
+				break;
+			}
+		}
+
+		if (pcie_dev->pending_ep_reg) {
+			for (i = 0; i < MAX_DEVICE_NUM; i++)
+				if (!pcie_dev->pcidev_table[i].registered)
+					break;
+
+			if (i == MAX_DEVICE_NUM)
+				pcie_dev->pending_ep_reg = false;
+		}
+	} else {
+		pcie_dev->event_reg = reg;
+		PCIE_DBG(pcie_dev,
+			"Event 0x%x is registered for RC %d\n", reg->events,
+			pcie_dev->rc_idx);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_register_event);
+
+int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
+{
+	int i, ret = 0;
+	struct msm_pcie_dev_t *pcie_dev;
+
+	if (!reg) {
+		pr_err("PCIe: Event deregistration is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!reg->user) {
+		pr_err("PCIe: User of event deregistration is NULL\n");
+		return -ENODEV;
+	}
+
+	pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
+
+	if (!pcie_dev) {
+		PCIE_ERR(pcie_dev, "%s",
+			"PCIe: did not find RC for pci endpoint device.\n");
+		return -ENODEV;
+	}
+
+	if (pcie_dev->num_ep > 1) {
+		for (i = 0; i < MAX_DEVICE_NUM; i++) {
+			if (reg->user == pcie_dev->pcidev_table[i].dev) {
+				if (pcie_dev->pcidev_table[i].event_reg) {
+					pcie_dev->num_active_ep--;
+					PCIE_DBG(pcie_dev,
+						"PCIe: RC%d: number of active EP(s) left: %d.\n",
+						pcie_dev->rc_idx,
+						pcie_dev->num_active_ep);
+				}
+
+				pcie_dev->event_reg = NULL;
+				pcie_dev->pcidev_table[i].event_reg = NULL;
+				PCIE_DBG(pcie_dev,
+					"Event is deregistered for RC %d\n",
+					pcie_dev->rc_idx);
+
+				break;
+			}
+		}
+	} else {
+		pcie_dev->event_reg = NULL;
+		PCIE_DBG(pcie_dev, "Event is deregistered for RC %d\n",
+				pcie_dev->rc_idx);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_deregister_event);
+
+int msm_pcie_recover_config(struct pci_dev *dev)
+{
+	int ret = 0;
+	struct msm_pcie_dev_t *pcie_dev;
+
+	if (dev) {
+		pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+		PCIE_DBG(pcie_dev,
+			"Recovery for the link of RC%d\n", pcie_dev->rc_idx);
+	} else {
+		pr_err("PCIe: the input pci dev is NULL.\n");
+		return -ENODEV;
+	}
+
+	if (msm_pcie_confirm_linkup(pcie_dev, true, true, pcie_dev->conf)) {
+		PCIE_DBG(pcie_dev,
+			"Recover config space of RC%d and its EP\n",
+			pcie_dev->rc_idx);
+		pcie_dev->shadow_en = false;
+		PCIE_DBG(pcie_dev, "Recover RC%d\n", pcie_dev->rc_idx);
+		msm_pcie_cfg_recover(pcie_dev, true);
+		PCIE_DBG(pcie_dev, "Recover EP of RC%d\n", pcie_dev->rc_idx);
+		msm_pcie_cfg_recover(pcie_dev, false);
+		PCIE_DBG(pcie_dev,
+			"Refreshing the saved config space in PCI framework for RC%d and its EP\n",
+			pcie_dev->rc_idx);
+		pci_save_state(pcie_dev->dev);
+		pci_save_state(dev);
+		pcie_dev->shadow_en = true;
+		PCIE_DBG(pcie_dev, "Turn on shadow for RC%d\n",
+			pcie_dev->rc_idx);
+	} else {
+		PCIE_ERR(pcie_dev,
+			"PCIe: the link of RC%d is not up yet; can't recover config space.\n",
+			pcie_dev->rc_idx);
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_recover_config);
+
+int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
+{
+	int ret = 0;
+	struct msm_pcie_dev_t *pcie_dev;
+
+	if (dev) {
+		pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
+		PCIE_DBG(pcie_dev,
+			"User requests to %s shadow\n",
+			enable ? "enable" : "disable");
+	} else {
+		pr_err("PCIe: the input pci dev is NULL.\n");
+		return -ENODEV;
+	}
+
+	PCIE_DBG(pcie_dev,
+		"The shadowing of RC%d is %s enabled currently.\n",
+		pcie_dev->rc_idx, pcie_dev->shadow_en ? "" : "not");
+
+	pcie_dev->shadow_en = enable;
+
+	PCIE_DBG(pcie_dev,
+		"Shadowing of RC%d is turned %s upon user's request.\n",
+		pcie_dev->rc_idx, enable ? "on" : "off");
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_pcie_shadow_control);
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index ac9545e..a08b100 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -65,4 +65,32 @@
 	  into suites according to the sub-unit of the IPA being tested.
 	  The user interface to run and control the tests is debugfs file
 	  system.
+
+config SPS
+	bool "SPS support"
+	select GENERIC_ALLOCATOR
+	help
+	  The SPS (Smart Peripheral Switch) is a DMA engine.
+	  It can move data in the following modes:
+		1. Peripheral-to-Peripheral.
+		2. Peripheral-to-Memory.
+		3. Memory-to-Memory.
+
+config SPS_SUPPORT_BAMDMA
+	bool "SPS support BAM DMA"
+	depends on SPS
+	default n
+	help
+	  The BAM-DMA is used for Memory-to-Memory transfers.
+	  The main use cases is RPC between processors.
+	  The BAM-DMA hardware has 2 registers sets:
+		1. A BAM HW like all the peripherals.
+		2. A DMA channel configuration (i.e. channel priority).
+
+config SPS_SUPPORT_NDP_BAM
+	bool "SPS support NDP BAM"
+	depends on SPS
+	default n
+	help
+	  No-Data-Path BAM is used to improve BAM performance.
 endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index 1f9e11b..7eebfcb5 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -4,3 +4,4 @@
 obj-$(CONFIG_GSI) += gsi/
 obj-$(CONFIG_IPA) += ipa/
 obj-$(CONFIG_IPA3) += ipa/
+obj-$(CONFIG_SPS) += sps/
diff --git a/drivers/platform/msm/sps/Makefile b/drivers/platform/msm/sps/Makefile
new file mode 100644
index 0000000..f19e162
--- /dev/null
+++ b/drivers/platform/msm/sps/Makefile
@@ -0,0 +1,2 @@
+obj-y += bam.o sps_bam.o sps.o sps_dma.o sps_map.o sps_mem.o sps_rm.o
+
diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c
new file mode 100644
index 0000000..8d8af1b
--- /dev/null
+++ b/drivers/platform/msm/sps/bam.c
@@ -0,0 +1,2346 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Bus-Access-Manager (BAM) Hardware manager. */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/io.h>		/* ioread32() */
+#include <linux/bitops.h>	/* find_first_bit() */
+#include <linux/errno.h>	/* ENODEV */
+#include <linux/memory.h>
+
+#include "bam.h"
+#include "sps_bam.h"
+
+/**
+ *  Valid BAM Hardware version.
+ *
+ */
+#define BAM_MIN_VERSION 2
+#define BAM_MAX_VERSION 0x2f
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+
+/* Maximum number of execution environment */
+#define BAM_MAX_EES 8
+
+/**
+ *  BAM Hardware registers bitmask.
+ *  format: <register>_<field>
+ *
+ */
+/* CTRL */
+#define BAM_MESS_ONLY_CANCEL_WB               0x100000
+#define CACHE_MISS_ERR_RESP_EN                 0x80000
+#define LOCAL_CLK_GATING                       0x60000
+#define IBC_DISABLE                            0x10000
+#define BAM_CACHED_DESC_STORE                   0x8000
+#define BAM_DESC_CACHE_SEL                      0x6000
+#define BAM_EN_ACCUM                              0x10
+#define BAM_EN                                     0x2
+#define BAM_SW_RST                                 0x1
+
+/* REVISION */
+#define BAM_INACTIV_TMR_BASE                0xff000000
+#define BAM_CMD_DESC_EN                       0x800000
+#define BAM_DESC_CACHE_DEPTH                  0x600000
+#define BAM_NUM_INACTIV_TMRS                  0x100000
+#define BAM_INACTIV_TMRS_EXST                  0x80000
+#define BAM_HIGH_FREQUENCY_BAM                 0x40000
+#define BAM_HAS_NO_BYPASS                      0x20000
+#define BAM_SECURED                            0x10000
+#define BAM_USE_VMIDMT                          0x8000
+#define BAM_AXI_ACTIVE                          0x4000
+#define BAM_CE_BUFFER_SIZE                      0x3000
+#define BAM_NUM_EES                              0xf00
+#define BAM_REVISION                              0xff
+
+/* SW_REVISION */
+#define BAM_MAJOR                           0xf0000000
+#define BAM_MINOR                            0xfff0000
+#define BAM_STEP                                0xffff
+
+/* NUM_PIPES */
+#define BAM_NON_PIPE_GRP                    0xff000000
+#define BAM_PERIPH_NON_PIPE_GRP               0xff0000
+#define BAM_DATA_ADDR_BUS_WIDTH                 0xC000
+#define BAM_NUM_PIPES                             0xff
+
+/* TIMER */
+#define BAM_TIMER                               0xffff
+
+/* TIMER_CTRL */
+#define TIMER_RST                           0x80000000
+#define TIMER_RUN                           0x40000000
+#define TIMER_MODE                          0x20000000
+#define TIMER_TRSHLD                            0xffff
+
+/* DESC_CNT_TRSHLD */
+#define BAM_DESC_CNT_TRSHLD                     0xffff
+
+/* IRQ_SRCS */
+#define BAM_IRQ                         0x80000000
+#define P_IRQ                           0x7fffffff
+
+/* IRQ_STTS */
+#define IRQ_STTS_BAM_TIMER_IRQ                         0x10
+#define IRQ_STTS_BAM_EMPTY_IRQ                          0x8
+#define IRQ_STTS_BAM_ERROR_IRQ                          0x4
+#define IRQ_STTS_BAM_HRESP_ERR_IRQ                      0x2
+
+/* IRQ_CLR */
+#define IRQ_CLR_BAM_TIMER_IRQ                          0x10
+#define IRQ_CLR_BAM_EMPTY_CLR                           0x8
+#define IRQ_CLR_BAM_ERROR_CLR                           0x4
+#define IRQ_CLR_BAM_HRESP_ERR_CLR                       0x2
+
+/* IRQ_EN */
+#define IRQ_EN_BAM_TIMER_IRQ                           0x10
+#define IRQ_EN_BAM_EMPTY_EN                             0x8
+#define IRQ_EN_BAM_ERROR_EN                             0x4
+#define IRQ_EN_BAM_HRESP_ERR_EN                         0x2
+
+/* AHB_MASTER_ERR_CTRLS */
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HVMID         0x7c0000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_DIRECT_MODE    0x20000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HCID           0x1f000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HPROT            0xf00
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HBURST            0xe0
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HSIZE             0x18
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HWRITE             0x4
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HTRANS             0x3
+
+/* TRUST_REG  */
+#define LOCK_EE_CTRL                            0x2000
+#define BAM_VMID                                0x1f00
+#define BAM_RST_BLOCK                             0x80
+#define BAM_EE                                     0x7
+
+/* TEST_BUS_SEL */
+#define BAM_SW_EVENTS_ZERO                    0x200000
+#define BAM_SW_EVENTS_SEL                     0x180000
+#define BAM_DATA_ERASE                         0x40000
+#define BAM_DATA_FLUSH                         0x20000
+#define BAM_CLK_ALWAYS_ON                      0x10000
+#define BAM_TESTBUS_SEL                           0x7f
+
+/* CNFG_BITS */
+#define CNFG_BITS_AOS_OVERFLOW_PRVNT		 0x80000000
+#define CNFG_BITS_MULTIPLE_EVENTS_DESC_AVAIL_EN  0x40000000
+#define CNFG_BITS_MULTIPLE_EVENTS_SIZE_EN        0x20000000
+#define CNFG_BITS_BAM_ZLT_W_CD_SUPPORT           0x10000000
+#define CNFG_BITS_BAM_CD_ENABLE                   0x8000000
+#define CNFG_BITS_BAM_AU_ACCUMED                  0x4000000
+#define CNFG_BITS_BAM_PSM_P_HD_DATA               0x2000000
+#define CNFG_BITS_BAM_REG_P_EN                    0x1000000
+#define CNFG_BITS_BAM_WB_DSC_AVL_P_RST             0x800000
+#define CNFG_BITS_BAM_WB_RETR_SVPNT                0x400000
+#define CNFG_BITS_BAM_WB_CSW_ACK_IDL               0x200000
+#define CNFG_BITS_BAM_WB_BLK_CSW                   0x100000
+#define CNFG_BITS_BAM_WB_P_RES                      0x80000
+#define CNFG_BITS_BAM_SI_P_RES                      0x40000
+#define CNFG_BITS_BAM_AU_P_RES                      0x20000
+#define CNFG_BITS_BAM_PSM_P_RES                     0x10000
+#define CNFG_BITS_BAM_PSM_CSW_REQ                    0x8000
+#define CNFG_BITS_BAM_SB_CLK_REQ                     0x4000
+#define CNFG_BITS_BAM_IBC_DISABLE                    0x2000
+#define CNFG_BITS_BAM_NO_EXT_P_RST                   0x1000
+#define CNFG_BITS_BAM_FULL_PIPE                       0x800
+#define CNFG_BITS_BAM_PIPE_CNFG                         0x4
+
+/* PIPE_ATTR_EEn*/
+#define BAM_ENABLED                              0x80000000
+#define P_ATTR                                   0x7fffffff
+
+/* P_ctrln */
+#define P_LOCK_GROUP                          0x1f0000
+#define P_WRITE_NWD                              0x800
+#define P_PREFETCH_LIMIT                         0x600
+#define P_AUTO_EOB_SEL                           0x180
+#define P_AUTO_EOB                                0x40
+#define P_SYS_MODE                                0x20
+#define P_SYS_STRM                                0x10
+#define P_DIRECTION                                0x8
+#define P_EN                                       0x2
+
+/* P_RSTn */
+#define P_RST_P_SW_RST                             0x1
+
+/* P_HALTn */
+#define P_HALT_P_PIPE_EMPTY			   0x8
+#define P_HALT_P_LAST_DESC_ZLT                     0x4
+#define P_HALT_P_PROD_HALTED                       0x2
+#define P_HALT_P_HALT                              0x1
+
+/* P_TRUST_REGn */
+#define BAM_P_VMID                              0x1f00
+#define BAM_P_SUP_GROUP                           0xf8
+#define BAM_P_EE                                   0x7
+
+/* P_IRQ_STTSn */
+#define P_IRQ_STTS_P_HRESP_ERR_IRQ                0x80
+#define P_IRQ_STTS_P_PIPE_RST_ERR_IRQ             0x40
+#define P_IRQ_STTS_P_TRNSFR_END_IRQ               0x20
+#define P_IRQ_STTS_P_ERR_IRQ                      0x10
+#define P_IRQ_STTS_P_OUT_OF_DESC_IRQ               0x8
+#define P_IRQ_STTS_P_WAKE_IRQ                      0x4
+#define P_IRQ_STTS_P_TIMER_IRQ                     0x2
+#define P_IRQ_STTS_P_PRCSD_DESC_IRQ                0x1
+
+/* P_IRQ_CLRn */
+#define P_IRQ_CLR_P_HRESP_ERR_CLR                 0x80
+#define P_IRQ_CLR_P_PIPE_RST_ERR_CLR              0x40
+#define P_IRQ_CLR_P_TRNSFR_END_CLR                0x20
+#define P_IRQ_CLR_P_ERR_CLR                       0x10
+#define P_IRQ_CLR_P_OUT_OF_DESC_CLR                0x8
+#define P_IRQ_CLR_P_WAKE_CLR                       0x4
+#define P_IRQ_CLR_P_TIMER_CLR                      0x2
+#define P_IRQ_CLR_P_PRCSD_DESC_CLR                 0x1
+
+/* P_IRQ_ENn */
+#define P_IRQ_EN_P_HRESP_ERR_EN                   0x80
+#define P_IRQ_EN_P_PIPE_RST_ERR_EN                0x40
+#define P_IRQ_EN_P_TRNSFR_END_EN                  0x20
+#define P_IRQ_EN_P_ERR_EN                         0x10
+#define P_IRQ_EN_P_OUT_OF_DESC_EN                  0x8
+#define P_IRQ_EN_P_WAKE_EN                         0x4
+#define P_IRQ_EN_P_TIMER_EN                        0x2
+#define P_IRQ_EN_P_PRCSD_DESC_EN                   0x1
+
+/* P_TIMERn */
+#define P_TIMER_P_TIMER                         0xffff
+
+/* P_TIMER_ctrln */
+#define P_TIMER_RST                         0x80000000
+#define P_TIMER_RUN                         0x40000000
+#define P_TIMER_MODE                        0x20000000
+#define P_TIMER_TRSHLD                          0xffff
+
+/* P_PRDCR_SDBNDn */
+#define P_PRDCR_SDBNDn_BAM_P_SB_UPDATED      0x1000000
+#define P_PRDCR_SDBNDn_BAM_P_TOGGLE           0x100000
+#define P_PRDCR_SDBNDn_BAM_P_CTRL              0xf0000
+#define P_PRDCR_SDBNDn_BAM_P_BYTES_FREE         0xffff
+
+/* P_CNSMR_SDBNDn */
+#define P_CNSMR_SDBNDn_BAM_P_SB_UPDATED      0x1000000
+#define P_CNSMR_SDBNDn_BAM_P_WAIT_4_ACK       0x800000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE       0x400000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE_R     0x200000
+#define P_CNSMR_SDBNDn_BAM_P_TOGGLE           0x100000
+#define P_CNSMR_SDBNDn_BAM_P_CTRL              0xf0000
+#define P_CNSMR_SDBNDn_BAM_P_BYTES_AVAIL        0xffff
+
+/* P_EVNT_regn */
+#define P_BYTES_CONSUMED                    0xffff0000
+#define P_DESC_FIFO_PEER_OFST                   0xffff
+
+/* P_SW_ofstsn */
+#define SW_OFST_IN_DESC                     0xffff0000
+#define SW_DESC_OFST                            0xffff
+
+/* P_EVNT_GEN_TRSHLDn */
+#define P_EVNT_GEN_TRSHLD_P_TRSHLD              0xffff
+
+/* P_FIFO_sizesn */
+#define P_DATA_FIFO_SIZE                    0xffff0000
+#define P_DESC_FIFO_SIZE                        0xffff
+
+#define P_RETR_CNTXT_RETR_DESC_OFST            0xffff0000
+#define P_RETR_CNTXT_RETR_OFST_IN_DESC             0xffff
+#define P_SI_CNTXT_SI_DESC_OFST                    0xffff
+#define P_DF_CNTXT_WB_ACCUMULATED              0xffff0000
+#define P_DF_CNTXT_DF_DESC_OFST                    0xffff
+#define P_AU_PSM_CNTXT_1_AU_PSM_ACCUMED        0xffff0000
+#define P_AU_PSM_CNTXT_1_AU_ACKED                  0xffff
+#define P_PSM_CNTXT_2_PSM_DESC_VALID           0x80000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ             0x40000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ_DONE        0x20000000
+#define P_PSM_CNTXT_2_PSM_GENERAL_BITS         0x1e000000
+#define P_PSM_CNTXT_2_PSM_CONS_STATE            0x1c00000
+#define P_PSM_CNTXT_2_PSM_PROD_SYS_STATE         0x380000
+#define P_PSM_CNTXT_2_PSM_PROD_B2B_STATE          0x70000
+#define P_PSM_CNTXT_2_PSM_DESC_SIZE                0xffff
+#define P_PSM_CNTXT_4_PSM_DESC_OFST            0xffff0000
+#define P_PSM_CNTXT_4_PSM_SAVED_ACCUMED_SIZE       0xffff
+#define P_PSM_CNTXT_5_PSM_BLOCK_BYTE_CNT       0xffff0000
+#define P_PSM_CNTXT_5_PSM_OFST_IN_DESC             0xffff
+
+#else
+
+/* Maximum number of execution environment */
+#define BAM_MAX_EES 4
+
+/**
+ *  BAM Hardware registers bitmask.
+ *  format: <register>_<field>
+ *
+ */
+/* CTRL */
+#define IBC_DISABLE                            0x10000
+#define BAM_CACHED_DESC_STORE                   0x8000
+#define BAM_DESC_CACHE_SEL                      0x6000
+/* BAM_PERIPH_IRQ_SIC_SEL is an obsolete field; This bit is reserved now */
+#define BAM_PERIPH_IRQ_SIC_SEL                  0x1000
+#define BAM_EN_ACCUM                              0x10
+#define BAM_EN                                     0x2
+#define BAM_SW_RST                                 0x1
+
+/* REVISION */
+#define BAM_INACTIV_TMR_BASE                0xff000000
+#define BAM_INACTIV_TMRS_EXST                  0x80000
+#define BAM_HIGH_FREQUENCY_BAM                 0x40000
+#define BAM_HAS_NO_BYPASS                      0x20000
+#define BAM_SECURED                            0x10000
+#define BAM_NUM_EES                              0xf00
+#define BAM_REVISION                              0xff
+
+/* NUM_PIPES */
+#define BAM_NON_PIPE_GRP                    0xff000000
+#define BAM_PERIPH_NON_PIPE_GRP               0xff0000
+#define BAM_DATA_ADDR_BUS_WIDTH                 0xC000
+#define BAM_NUM_PIPES                             0xff
+
+/* DESC_CNT_TRSHLD */
+#define BAM_DESC_CNT_TRSHLD                     0xffff
+
+/* IRQ_SRCS */
+#define BAM_IRQ                         0x80000000
+#define P_IRQ                           0x7fffffff
+
+#define IRQ_STTS_BAM_EMPTY_IRQ                          0x8
+#define IRQ_STTS_BAM_ERROR_IRQ                          0x4
+#define IRQ_STTS_BAM_HRESP_ERR_IRQ                      0x2
+#define IRQ_CLR_BAM_EMPTY_CLR                           0x8
+#define IRQ_CLR_BAM_ERROR_CLR                           0x4
+#define IRQ_CLR_BAM_HRESP_ERR_CLR                       0x2
+#define IRQ_EN_BAM_EMPTY_EN                             0x8
+#define IRQ_EN_BAM_ERROR_EN                             0x4
+#define IRQ_EN_BAM_HRESP_ERR_EN                         0x2
+#define IRQ_SIC_SEL_BAM_IRQ_SIC_SEL              0x80000000
+#define IRQ_SIC_SEL_P_IRQ_SIC_SEL                0x7fffffff
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HVMID         0x7c0000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_DIRECT_MODE    0x20000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HCID           0x1f000
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HPROT            0xf00
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HBURST            0xe0
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HSIZE             0x18
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HWRITE             0x4
+#define AHB_MASTER_ERR_CTRLS_BAM_ERR_HTRANS             0x3
+#define CNFG_BITS_BAM_AU_ACCUMED                  0x4000000
+#define CNFG_BITS_BAM_PSM_P_HD_DATA               0x2000000
+#define CNFG_BITS_BAM_REG_P_EN                    0x1000000
+#define CNFG_BITS_BAM_WB_DSC_AVL_P_RST             0x800000
+#define CNFG_BITS_BAM_WB_RETR_SVPNT                0x400000
+#define CNFG_BITS_BAM_WB_CSW_ACK_IDL               0x200000
+#define CNFG_BITS_BAM_WB_BLK_CSW                   0x100000
+#define CNFG_BITS_BAM_WB_P_RES                      0x80000
+#define CNFG_BITS_BAM_SI_P_RES                      0x40000
+#define CNFG_BITS_BAM_AU_P_RES                      0x20000
+#define CNFG_BITS_BAM_PSM_P_RES                     0x10000
+#define CNFG_BITS_BAM_PSM_CSW_REQ                    0x8000
+#define CNFG_BITS_BAM_SB_CLK_REQ                     0x4000
+#define CNFG_BITS_BAM_IBC_DISABLE                    0x2000
+#define CNFG_BITS_BAM_NO_EXT_P_RST                   0x1000
+#define CNFG_BITS_BAM_FULL_PIPE                       0x800
+#define CNFG_BITS_BAM_PIPE_CNFG                         0x4
+
+/* TEST_BUS_SEL */
+#define BAM_DATA_ERASE                         0x40000
+#define BAM_DATA_FLUSH                         0x20000
+#define BAM_CLK_ALWAYS_ON                      0x10000
+#define BAM_TESTBUS_SEL                           0x7f
+
+/* TRUST_REG  */
+#define BAM_VMID                                0x1f00
+#define BAM_RST_BLOCK                             0x80
+#define BAM_EE                                     0x3
+
+/* P_TRUST_REGn */
+#define BAM_P_VMID                              0x1f00
+#define BAM_P_EE                                   0x3
+
+/* P_PRDCR_SDBNDn */
+#define P_PRDCR_SDBNDn_BAM_P_SB_UPDATED      0x1000000
+#define P_PRDCR_SDBNDn_BAM_P_TOGGLE           0x100000
+#define P_PRDCR_SDBNDn_BAM_P_CTRL              0xf0000
+#define P_PRDCR_SDBNDn_BAM_P_BYTES_FREE         0xffff
+/* P_CNSMR_SDBNDn */
+#define P_CNSMR_SDBNDn_BAM_P_SB_UPDATED      0x1000000
+#define P_CNSMR_SDBNDn_BAM_P_WAIT_4_ACK       0x800000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE       0x400000
+#define P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE_R     0x200000
+#define P_CNSMR_SDBNDn_BAM_P_TOGGLE           0x100000
+#define P_CNSMR_SDBNDn_BAM_P_CTRL              0xf0000
+#define P_CNSMR_SDBNDn_BAM_P_BYTES_AVAIL        0xffff
+
+/* P_ctrln */
+#define P_PREFETCH_LIMIT                         0x600
+#define P_AUTO_EOB_SEL                           0x180
+#define P_AUTO_EOB                                0x40
+#define P_SYS_MODE                             0x20
+#define P_SYS_STRM                             0x10
+#define P_DIRECTION                             0x8
+#define P_EN                                    0x2
+
+#define P_RST_P_SW_RST                                 0x1
+
+#define P_HALT_P_PROD_HALTED                           0x2
+#define P_HALT_P_HALT                                  0x1
+
+#define P_IRQ_STTS_P_TRNSFR_END_IRQ                   0x20
+#define P_IRQ_STTS_P_ERR_IRQ                          0x10
+#define P_IRQ_STTS_P_OUT_OF_DESC_IRQ                   0x8
+#define P_IRQ_STTS_P_WAKE_IRQ                          0x4
+#define P_IRQ_STTS_P_TIMER_IRQ                         0x2
+#define P_IRQ_STTS_P_PRCSD_DESC_IRQ                    0x1
+
+#define P_IRQ_CLR_P_TRNSFR_END_CLR                    0x20
+#define P_IRQ_CLR_P_ERR_CLR                           0x10
+#define P_IRQ_CLR_P_OUT_OF_DESC_CLR                    0x8
+#define P_IRQ_CLR_P_WAKE_CLR                           0x4
+#define P_IRQ_CLR_P_TIMER_CLR                          0x2
+#define P_IRQ_CLR_P_PRCSD_DESC_CLR                     0x1
+
+#define P_IRQ_EN_P_TRNSFR_END_EN                      0x20
+#define P_IRQ_EN_P_ERR_EN                             0x10
+#define P_IRQ_EN_P_OUT_OF_DESC_EN                      0x8
+#define P_IRQ_EN_P_WAKE_EN                             0x4
+#define P_IRQ_EN_P_TIMER_EN                            0x2
+#define P_IRQ_EN_P_PRCSD_DESC_EN                       0x1
+
+#define P_TIMER_P_TIMER                             0xffff
+
+/* P_TIMER_ctrln */
+#define P_TIMER_RST                0x80000000
+#define P_TIMER_RUN                0x40000000
+#define P_TIMER_MODE               0x20000000
+#define P_TIMER_TRSHLD                 0xffff
+
+/* P_EVNT_regn */
+#define P_BYTES_CONSUMED             0xffff0000
+#define P_DESC_FIFO_PEER_OFST            0xffff
+
+/* P_SW_ofstsn */
+#define SW_OFST_IN_DESC              0xffff0000
+#define SW_DESC_OFST                     0xffff
+
+#define P_EVNT_GEN_TRSHLD_P_TRSHLD                  0xffff
+
+/* P_FIFO_sizesn */
+#define P_DATA_FIFO_SIZE           0xffff0000
+#define P_DESC_FIFO_SIZE               0xffff
+
+#define P_RETR_CNTXT_RETR_DESC_OFST            0xffff0000
+#define P_RETR_CNTXT_RETR_OFST_IN_DESC             0xffff
+#define P_SI_CNTXT_SI_DESC_OFST                    0xffff
+#define P_AU_PSM_CNTXT_1_AU_PSM_ACCUMED        0xffff0000
+#define P_AU_PSM_CNTXT_1_AU_ACKED                  0xffff
+#define P_PSM_CNTXT_2_PSM_DESC_VALID           0x80000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ             0x40000000
+#define P_PSM_CNTXT_2_PSM_DESC_IRQ_DONE        0x20000000
+#define P_PSM_CNTXT_2_PSM_GENERAL_BITS         0x1e000000
+#define P_PSM_CNTXT_2_PSM_CONS_STATE            0x1c00000
+#define P_PSM_CNTXT_2_PSM_PROD_SYS_STATE         0x380000
+#define P_PSM_CNTXT_2_PSM_PROD_B2B_STATE          0x70000
+#define P_PSM_CNTXT_2_PSM_DESC_SIZE                0xffff
+#define P_PSM_CNTXT_4_PSM_DESC_OFST            0xffff0000
+#define P_PSM_CNTXT_4_PSM_SAVED_ACCUMED_SIZE       0xffff
+#define P_PSM_CNTXT_5_PSM_BLOCK_BYTE_CNT       0xffff0000
+#define P_PSM_CNTXT_5_PSM_OFST_IN_DESC             0xffff
+#endif
+
+#define BAM_ERROR   (-1)
+
+enum bam_regs {
+	CTRL,
+	REVISION,
+	SW_REVISION,
+	NUM_PIPES,
+	TIMER,
+	TIMER_CTRL,
+	DESC_CNT_TRSHLD,
+	IRQ_SRCS,
+	IRQ_SRCS_MSK,
+	IRQ_SRCS_UNMASKED,
+	IRQ_STTS,
+	IRQ_CLR,
+	IRQ_EN,
+	IRQ_SIC_SEL,
+	AHB_MASTER_ERR_CTRLS,
+	AHB_MASTER_ERR_ADDR,
+	AHB_MASTER_ERR_ADDR_MSB,
+	AHB_MASTER_ERR_DATA,
+	IRQ_DEST,
+	PERIPH_IRQ_DEST,
+	TRUST_REG,
+	TEST_BUS_SEL,
+	TEST_BUS_REG,
+	CNFG_BITS,
+	IRQ_SRCS_EE,
+	IRQ_SRCS_MSK_EE,
+	IRQ_SRCS_UNMASKED_EE,
+	PIPE_ATTR_EE,
+	P_CTRL,
+	P_RST,
+	P_HALT,
+	P_IRQ_STTS,
+	P_IRQ_CLR,
+	P_IRQ_EN,
+	P_TIMER,
+	P_TIMER_CTRL,
+	P_PRDCR_SDBND,
+	P_CNSMR_SDBND,
+	P_EVNT_DEST_ADDR,
+	P_EVNT_DEST_ADDR_MSB,
+	P_EVNT_REG,
+	P_SW_OFSTS,
+	P_DATA_FIFO_ADDR,
+	P_DATA_FIFO_ADDR_MSB,
+	P_DESC_FIFO_ADDR,
+	P_DESC_FIFO_ADDR_MSB,
+	P_EVNT_GEN_TRSHLD,
+	P_FIFO_SIZES,
+	P_IRQ_DEST_ADDR,
+	P_RETR_CNTXT,
+	P_SI_CNTXT,
+	P_DF_CNTXT,
+	P_AU_PSM_CNTXT_1,
+	P_PSM_CNTXT_2,
+	P_PSM_CNTXT_3,
+	P_PSM_CNTXT_3_MSB,
+	P_PSM_CNTXT_4,
+	P_PSM_CNTXT_5,
+	P_TRUST_REG,
+	BAM_MAX_REGS,
+};
+
+static u32 bam_regmap[][BAM_MAX_REGS] = {
+	{ /* LEGACY BAM*/
+			[CTRL] = 0xf80,
+			[REVISION] = 0xf84,
+			[NUM_PIPES] = 0xfbc,
+			[DESC_CNT_TRSHLD] = 0xf88,
+			[IRQ_SRCS] = 0xf8c,
+			[IRQ_SRCS_MSK] = 0xf90,
+			[IRQ_SRCS_UNMASKED] = 0xfb0,
+			[IRQ_STTS] = 0xf94,
+			[IRQ_CLR] = 0xf98,
+			[IRQ_EN] = 0xf9c,
+			[IRQ_SIC_SEL] = 0xfa0,
+			[AHB_MASTER_ERR_CTRLS] = 0xfa4,
+			[AHB_MASTER_ERR_ADDR] = 0xfa8,
+			[AHB_MASTER_ERR_DATA] = 0xfac,
+			[IRQ_DEST] = 0xfb4,
+			[PERIPH_IRQ_DEST] = 0xfb8,
+			[TRUST_REG] = 0xff0,
+			[TEST_BUS_SEL] = 0xff4,
+			[TEST_BUS_REG] = 0xff8,
+			[CNFG_BITS] = 0xffc,
+			[IRQ_SRCS_EE] = 0x1800,
+			[IRQ_SRCS_MSK_EE] = 0x1804,
+			[IRQ_SRCS_UNMASKED_EE] = 0x1808,
+			[P_CTRL] = 0x0,
+			[P_RST] = 0x4,
+			[P_HALT] = 0x8,
+			[P_IRQ_STTS] = 0x10,
+			[P_IRQ_CLR] = 0x14,
+			[P_IRQ_EN] = 0x18,
+			[P_TIMER] = 0x1c,
+			[P_TIMER_CTRL] = 0x20,
+			[P_PRDCR_SDBND] = 0x24,
+			[P_CNSMR_SDBND] = 0x28,
+			[P_EVNT_DEST_ADDR] = 0x102c,
+			[P_EVNT_REG] = 0x1018,
+			[P_SW_OFSTS] = 0x1000,
+			[P_DATA_FIFO_ADDR] = 0x1024,
+			[P_DESC_FIFO_ADDR] = 0x101c,
+			[P_EVNT_GEN_TRSHLD] = 0x1028,
+			[P_FIFO_SIZES] = 0x1020,
+			[P_IRQ_DEST_ADDR] = 0x103c,
+			[P_RETR_CNTXT] = 0x1034,
+			[P_SI_CNTXT] = 0x1038,
+			[P_AU_PSM_CNTXT_1] = 0x1004,
+			[P_PSM_CNTXT_2] = 0x1008,
+			[P_PSM_CNTXT_3] = 0x100c,
+			[P_PSM_CNTXT_4] = 0x1010,
+			[P_PSM_CNTXT_5] = 0x1014,
+			[P_TRUST_REG] = 0x30,
+	},
+	{ /* NDP BAM */
+			[CTRL] = 0x0,
+			[REVISION] = 0x4,
+			[SW_REVISION] = 0x80,
+			[NUM_PIPES] = 0x3c,
+			[TIMER] = 0x40,
+			[TIMER_CTRL] = 0x44,
+			[DESC_CNT_TRSHLD] = 0x8,
+			[IRQ_SRCS] = 0xc,
+			[IRQ_SRCS_MSK] = 0x10,
+			[IRQ_SRCS_UNMASKED] = 0x30,
+			[IRQ_STTS] = 0x14,
+			[IRQ_CLR] = 0x18,
+			[IRQ_EN] = 0x1c,
+			[AHB_MASTER_ERR_CTRLS] = 0x24,
+			[AHB_MASTER_ERR_ADDR] = 0x28,
+			[AHB_MASTER_ERR_ADDR_MSB] = 0x104,
+			[AHB_MASTER_ERR_DATA] = 0x2c,
+			[TRUST_REG] = 0x70,
+			[TEST_BUS_SEL] = 0x74,
+			[TEST_BUS_REG] = 0x78,
+			[CNFG_BITS] = 0x7c,
+			[IRQ_SRCS_EE] = 0x800,
+			[IRQ_SRCS_MSK_EE] = 0x804,
+			[IRQ_SRCS_UNMASKED_EE] = 0x808,
+			[PIPE_ATTR_EE] = 0x80c,
+			[P_CTRL] = 0x1000,
+			[P_RST] = 0x1004,
+			[P_HALT] = 0x1008,
+			[P_IRQ_STTS] = 0x1010,
+			[P_IRQ_CLR] = 0x1014,
+			[P_IRQ_EN] = 0x1018,
+			[P_TIMER] = 0x101c,
+			[P_TIMER_CTRL] = 0x1020,
+			[P_PRDCR_SDBND] = 0x1024,
+			[P_CNSMR_SDBND] = 0x1028,
+			[P_EVNT_DEST_ADDR] = 0x182c,
+			[P_EVNT_DEST_ADDR_MSB] = 0x1934,
+			[P_EVNT_REG] = 0x1818,
+			[P_SW_OFSTS] = 0x1800,
+			[P_DATA_FIFO_ADDR] = 0x1824,
+			[P_DATA_FIFO_ADDR_MSB] = 0x1924,
+			[P_DESC_FIFO_ADDR] = 0x181c,
+			[P_DESC_FIFO_ADDR_MSB] = 0x1914,
+			[P_EVNT_GEN_TRSHLD] = 0x1828,
+			[P_FIFO_SIZES] = 0x1820,
+			[P_RETR_CNTXT] = 0x1834,
+			[P_SI_CNTXT] = 0x1838,
+			[P_DF_CNTXT] = 0x1830,
+			[P_AU_PSM_CNTXT_1] = 0x1804,
+			[P_PSM_CNTXT_2] = 0x1808,
+			[P_PSM_CNTXT_3] = 0x180c,
+			[P_PSM_CNTXT_3_MSB] = 0x1904,
+			[P_PSM_CNTXT_4] = 0x1810,
+			[P_PSM_CNTXT_5] = 0x1814,
+			[P_TRUST_REG] = 0x1030,
+	},
+	{ /* 4K OFFSETs*/
+			[CTRL] = 0x0,
+			[REVISION] = 0x1000,
+			[SW_REVISION] = 0x1004,
+			[NUM_PIPES] = 0x1008,
+			[TIMER] = 0x40,
+			[TIMER_CTRL] = 0x44,
+			[DESC_CNT_TRSHLD] = 0x8,
+			[IRQ_SRCS] = 0x3010,
+			[IRQ_SRCS_MSK] = 0x3014,
+			[IRQ_SRCS_UNMASKED] = 0x3018,
+			[IRQ_STTS] = 0x14,
+			[IRQ_CLR] = 0x18,
+			[IRQ_EN] = 0x1c,
+			[AHB_MASTER_ERR_CTRLS] = 0x1024,
+			[AHB_MASTER_ERR_ADDR] = 0x1028,
+			[AHB_MASTER_ERR_ADDR_MSB] = 0x1104,
+			[AHB_MASTER_ERR_DATA] = 0x102c,
+			[TRUST_REG] = 0x2000,
+			[TEST_BUS_SEL] = 0x1010,
+			[TEST_BUS_REG] = 0x1014,
+			[CNFG_BITS] = 0x7c,
+			[IRQ_SRCS_EE] = 0x3000,
+			[IRQ_SRCS_MSK_EE] = 0x3004,
+			[IRQ_SRCS_UNMASKED_EE] = 0x3008,
+			[PIPE_ATTR_EE] = 0x300c,
+			[P_CTRL] = 0x13000,
+			[P_RST] = 0x13004,
+			[P_HALT] = 0x13008,
+			[P_IRQ_STTS] = 0x13010,
+			[P_IRQ_CLR] = 0x13014,
+			[P_IRQ_EN] = 0x13018,
+			[P_TIMER] = 0x1301c,
+			[P_TIMER_CTRL] = 0x13020,
+			[P_PRDCR_SDBND] = 0x13024,
+			[P_CNSMR_SDBND] = 0x13028,
+			[P_EVNT_DEST_ADDR] = 0x1382c,
+			[P_EVNT_DEST_ADDR_MSB] = 0x13934,
+			[P_EVNT_REG] = 0x13818,
+			[P_SW_OFSTS] = 0x13800,
+			[P_DATA_FIFO_ADDR] = 0x13824,
+			[P_DATA_FIFO_ADDR_MSB] = 0x13924,
+			[P_DESC_FIFO_ADDR] = 0x1381c,
+			[P_DESC_FIFO_ADDR_MSB] = 0x13914,
+			[P_EVNT_GEN_TRSHLD] = 0x13828,
+			[P_FIFO_SIZES] = 0x13820,
+			[P_RETR_CNTXT] = 0x13834,
+			[P_SI_CNTXT] = 0x13838,
+			[P_DF_CNTXT] = 0x13830,
+			[P_AU_PSM_CNTXT_1] = 0x13804,
+			[P_PSM_CNTXT_2] = 0x13808,
+			[P_PSM_CNTXT_3] = 0x1380c,
+			[P_PSM_CNTXT_3_MSB] = 0x13904,
+			[P_PSM_CNTXT_4] = 0x13810,
+			[P_PSM_CNTXT_5] = 0x13814,
+			[P_TRUST_REG] = 0x2020,
+	},
+};
+
+/* AHB buffer error control */
+enum bam_nonsecure_reset {
+	BAM_NONSECURE_RESET_ENABLE  = 0,
+	BAM_NONSECURE_RESET_DISABLE = 1,
+};
+
+static inline u32 bam_get_register_offset(void *base, enum bam_regs reg,
+								u32 param)
+{
+	u32 index = BAM_ERROR, offset = 0;
+	u32 *ptr_reg = bam_regmap[bam_type];
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+
+	if (reg >= CTRL && reg < IRQ_SRCS_EE)
+		index = 0;
+	if (reg >= IRQ_SRCS_EE && reg < P_CTRL)
+		index = (bam_type == SPS_BAM_NDP_4K) ? 0x1000 : 0x80;
+	if (reg >= P_CTRL && reg < P_TRUST_REG) {
+		if (bam_type == SPS_BAM_LEGACY) {
+			if (reg >= P_EVNT_DEST_ADDR)
+				index = 0x40;
+			else
+				index = 0x80;
+		} else
+			index = 0x1000;
+	} else if (reg == P_TRUST_REG) {
+		if (bam_type == SPS_BAM_LEGACY)
+			index = 0x80;
+		else
+			index = (bam_type == SPS_BAM_NDP_4K) ? 0x4 : 0x1000;
+	}
+	if (index < 0) {
+		SPS_ERR(dev, "%s:Failed to find register offset index\n",
+			__func__);
+		return index;
+	}
+
+	offset = *(ptr_reg + reg) + (index * param);
+	return offset;
+}
+
+
+/**
+ *
+ * Read register with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ *
+ * @return u32
+ */
+static inline u32 bam_read_reg(void *base, enum bam_regs reg, u32 param)
+{
+	u32 val, offset = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+	offset = bam_get_register_offset(base, reg, param);
+	if (offset < 0) {
+		SPS_ERR(dev, "%s:Failed to get the register offset\n",
+			__func__);
+		return offset;
+	}
+	val = ioread32(dev->base + offset);
+	SPS_DBG(dev, "sps:bam 0x%p(va) offset 0x%x reg 0x%x r_val 0x%x.\n",
+			dev->base, offset, reg, val);
+	return val;
+}
+
+/**
+ * Read register masked field with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ *
+ * @return u32
+ */
+static inline u32 bam_read_reg_field(void *base, enum bam_regs reg, u32 param,
+								const u32 mask)
+{
+	u32 val, shift, offset = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+	shift = find_first_bit((void *)&mask, 32);
+	offset = bam_get_register_offset(base, reg, param);
+	if (offset < 0) {
+		SPS_ERR(dev, "%s:Failed to get the register offset\n",
+			__func__);
+		return offset;
+	}
+	val = ioread32(dev->base + offset);
+	val &= mask;		/* clear other bits */
+	val >>= shift;
+	SPS_DBG(dev, "sps:bam 0x%p(va) read reg 0x%x mask 0x%x r_val 0x%x.\n",
+			dev->base, offset, mask, val);
+	return val;
+}
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @val - value to write.
+ *
+ */
+static inline void bam_write_reg(void *base, enum bam_regs reg,
+						u32 param, u32 val)
+{
+	u32 offset = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return;
+	}
+	offset = bam_get_register_offset(base, reg, param);
+	if (offset < 0) {
+		SPS_ERR(dev, "%s:Failed to get the register offset\n",
+			__func__);
+		return;
+	}
+	iowrite32(val, dev->base + offset);
+	SPS_DBG(dev, "sps:bam 0x%p(va) write reg 0x%x w_val 0x%x.\n",
+			dev->base, offset, val);
+}
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void bam_write_reg_field(void *base, enum bam_regs reg,
+					u32 param, const u32 mask, u32 val)
+{
+	u32 tmp, shift, offset = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return;
+	}
+	shift = find_first_bit((void *)&mask, 32);
+	offset = bam_get_register_offset(base, reg, param);
+	if (offset < 0) {
+		SPS_ERR(dev, "%s:Failed to get the register offset\n",
+			__func__);
+		return;
+	}
+	tmp = ioread32(dev->base + offset);
+
+	tmp &= ~mask;		/* clear written bits */
+	val = tmp | (val << shift);
+	iowrite32(val, dev->base + offset);
+	SPS_DBG(dev, "sps:bam 0x%p(va) write reg 0x%x w_val 0x%x.\n",
+			dev->base, offset, val);
+}
+
+/**
+ * Initialize a BAM device
+ *
+ */
+int bam_init(void *base, u32 ee,
+		u16 summing_threshold,
+		u32 irq_mask, u32 *version,
+		u32 *num_pipes, u32 options)
+{
+	u32 cfg_bits;
+	u32 ver = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG3(dev, "sps:%s:bam=%pa 0x%p(va).ee=%d.", __func__,
+			BAM_ID(dev), dev->base, ee);
+
+	ver = bam_read_reg_field(base, REVISION, 0, BAM_REVISION);
+
+	if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) {
+		SPS_ERR(dev, "sps:bam 0x%p(va) Invalid BAM REVISION 0x%x.\n",
+				dev->base, ver);
+		return -ENODEV;
+	}
+
+	SPS_DBG(dev, "sps:REVISION of BAM 0x%p is 0x%x.\n",
+				dev->base, ver);
+
+	if (summing_threshold == 0) {
+		summing_threshold = 4;
+		SPS_ERR(dev,
+			"sps:bam 0x%p(va) summing_threshold is zero,use default 4.\n",
+			dev->base);
+	}
+
+	if (options & SPS_BAM_NO_EXT_P_RST)
+		cfg_bits = 0xffffffff & ~(3 << 11);
+	else
+		cfg_bits = 0xffffffff & ~(1 << 11);
+
+	bam_write_reg_field(base, CTRL, 0, BAM_SW_RST, 1);
+	/* No delay needed */
+	bam_write_reg_field(base, CTRL, 0, BAM_SW_RST, 0);
+
+	bam_write_reg_field(base, CTRL, 0, BAM_EN, 1);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	bam_write_reg_field(base, CTRL, 0, CACHE_MISS_ERR_RESP_EN, 0);
+
+	if (options & SPS_BAM_NO_LOCAL_CLK_GATING)
+		bam_write_reg_field(base, CTRL, 0, LOCAL_CLK_GATING, 0);
+	else
+		bam_write_reg_field(base, CTRL, 0, LOCAL_CLK_GATING, 1);
+
+	if (enhd_pipe) {
+		if (options & SPS_BAM_CANCEL_WB)
+			bam_write_reg_field(base, CTRL, 0,
+					BAM_MESS_ONLY_CANCEL_WB, 1);
+		else
+			bam_write_reg_field(base, CTRL, 0,
+					BAM_MESS_ONLY_CANCEL_WB, 0);
+	}
+#endif
+	bam_write_reg(base, DESC_CNT_TRSHLD, 0, summing_threshold);
+
+	bam_write_reg(base, CNFG_BITS, 0, cfg_bits);
+
+	/*
+	 *  Enable Global BAM Interrupt - for error reasons ,
+	 *  filter with mask.
+	 *  Note: Pipes interrupts are disabled until BAM_P_IRQ_enn is set
+	 */
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE, ee, BAM_IRQ, 1);
+
+	bam_write_reg(base, IRQ_EN, 0, irq_mask);
+
+	*num_pipes = bam_read_reg_field(base, NUM_PIPES, 0, BAM_NUM_PIPES);
+
+	*version = ver;
+
+	return 0;
+}
+
+/**
+ * Set BAM global execution environment
+ *
+ * @base - BAM virtual base address
+ *
+ * @ee - BAM execution environment index
+ *
+ * @vmid - virtual master identifier
+ *
+ * @reset - enable/disable BAM global software reset
+ */
+static void bam_set_ee(void *base, u32 ee, u32 vmid,
+			enum bam_nonsecure_reset reset)
+{
+	bam_write_reg_field(base, TRUST_REG, 0, BAM_EE, ee);
+	bam_write_reg_field(base, TRUST_REG, 0, BAM_VMID, vmid);
+	bam_write_reg_field(base, TRUST_REG, 0, BAM_RST_BLOCK, reset);
+}
+
+/**
+ * Set the pipe execution environment
+ *
+ * @base - BAM virtual base address
+ *
+ * @pipe - pipe index
+ *
+ * @ee - BAM execution environment index
+ *
+ * @vmid - virtual master identifier
+ */
+static void bam_pipe_set_ee(void *base, u32 pipe, u32 ee, u32 vmid)
+{
+	bam_write_reg_field(base, P_TRUST_REG, pipe, BAM_P_EE, ee);
+	bam_write_reg_field(base, P_TRUST_REG, pipe, BAM_P_VMID, vmid);
+}
+
+/**
+ * Initialize BAM device security execution environment
+ */
+int bam_security_init(void *base, u32 ee, u32 vmid, u32 pipe_mask)
+{
+	u32 version;
+	u32 num_pipes;
+	u32 mask;
+	u32 pipe;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG3(dev, "sps:%s:bam=%pa 0x%p(va).", __func__,
+			BAM_ID(dev), dev->base);
+
+	/*
+	 * Discover the hardware version number and the number of pipes
+	 * supported by this BAM
+	 */
+	version = bam_read_reg_field(base, REVISION, 0, BAM_REVISION);
+	num_pipes = bam_read_reg_field(base, NUM_PIPES, 0, BAM_NUM_PIPES);
+	if (version < 3 || version > 0x1F) {
+		SPS_ERR(dev,
+			"sps:bam 0x%p(va) security is not supported for this BAM version 0x%x.\n",
+			dev->base, version);
+		return -ENODEV;
+	}
+
+	if (num_pipes > BAM_MAX_PIPES) {
+		SPS_ERR(dev,
+		"sps:bam 0x%p(va) the number of pipes is more than the maximum number allowed.\n",
+			dev->base);
+		return -ENODEV;
+	}
+
+	for (pipe = 0, mask = 1; pipe < num_pipes; pipe++, mask <<= 1)
+		if ((mask & pipe_mask) != 0)
+			bam_pipe_set_ee(base, pipe, ee, vmid);
+
+	/* If MSbit is set, assign top-level interrupt to this EE */
+	mask = 1UL << 31;
+	if ((mask & pipe_mask) != 0)
+		bam_set_ee(base, ee, vmid, BAM_NONSECURE_RESET_ENABLE);
+
+	return 0;
+}
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+static inline u32 bam_get_pipe_attr(void *base, u32 ee, bool global)
+{
+	u32 val;
+
+	if (global)
+		val = bam_read_reg_field(base, PIPE_ATTR_EE, ee, BAM_ENABLED);
+	else
+		val = bam_read_reg_field(base, PIPE_ATTR_EE, ee, P_ATTR);
+
+	return val;
+}
+#else
+static inline u32 bam_get_pipe_attr(void *base, u32 ee, bool global)
+{
+	return 0;
+}
+#endif
+
+/**
+ * Verify that a BAM device is enabled and gathers the hardware
+ * configuration.
+ *
+ */
+int bam_check(void *base, u32 *version, u32 ee, u32 *num_pipes)
+{
+	u32 ver = 0;
+	u32 enabled = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG3(dev, "sps:%s:bam=%pa 0x%p(va).",
+			__func__, BAM_ID(dev), dev->base);
+
+	if (!enhd_pipe)
+		enabled = bam_read_reg_field(base, CTRL, 0, BAM_EN);
+	else
+		enabled = bam_get_pipe_attr(base, ee, true);
+
+	if (!enabled) {
+		SPS_ERR(dev, "sps:%s:bam 0x%p(va) is not enabled.\n",
+				__func__, dev->base);
+		return -ENODEV;
+	}
+
+	ver = bam_read_reg(base, REVISION, 0) & BAM_REVISION;
+
+	/*
+	 *  Discover the hardware version number and the number of pipes
+	 *  supported by this BAM
+	 */
+	*num_pipes = bam_read_reg_field(base, NUM_PIPES, 0, BAM_NUM_PIPES);
+	*version = ver;
+
+	/* Check BAM version */
+	if ((ver < BAM_MIN_VERSION) || (ver > BAM_MAX_VERSION)) {
+		SPS_ERR(dev, "sps:%s:bam 0x%p(va) Invalid BAM version 0x%x.\n",
+				__func__, dev->base, ver);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * Disable a BAM device
+ *
+ */
+void bam_exit(void *base, u32 ee)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG3(dev, "sps:%s:bam=%pa 0x%p(va).ee=%d.",
+			__func__, BAM_ID(dev), dev->base, ee);
+
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE, ee, BAM_IRQ, 0);
+
+	bam_write_reg(base, IRQ_EN, 0, 0);
+
+	/* Disable the BAM */
+	bam_write_reg_field(base, CTRL, 0, BAM_EN, 0);
+}
+
+/**
+ * Output BAM register content
+ * including the TEST_BUS register content under
+ * different TEST_BUS_SEL values.
+ */
+void bam_output_register_content(void *base, u32 ee)
+{
+	u32 num_pipes;
+	u32 i;
+	u32 pipe_attr = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return;
+	}
+
+	print_bam_test_bus_reg(base, 0);
+
+	print_bam_selected_reg(base, BAM_MAX_EES);
+
+	num_pipes = bam_read_reg_field(base, NUM_PIPES, 0,
+					BAM_NUM_PIPES);
+	SPS_INFO(dev, "sps:bam %pa 0x%p(va) has %d pipes.",
+			BAM_ID(dev), dev->base, num_pipes);
+
+	pipe_attr = enhd_pipe ?
+		bam_get_pipe_attr(base, ee, false) : 0x0;
+
+	if (!enhd_pipe || !pipe_attr)
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(base, i);
+	else {
+		for (i = 0; i < num_pipes; i++) {
+			if (pipe_attr & (1UL << i))
+				print_bam_pipe_selected_reg(base, i);
+		}
+	}
+}
+
+/**
+ * Get BAM IRQ source and clear global IRQ status
+ */
+u32 bam_check_irq_source(void *base, u32 ee, u32 mask,
+				enum sps_callback_case *cb_case)
+{
+	u32 source = 0, clr = 0;
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+	source = bam_read_reg(base, IRQ_SRCS_EE, ee);
+	clr = source & (1UL << 31);
+
+	if (clr) {
+		u32 status = 0;
+
+		status = bam_read_reg(base, IRQ_STTS, 0);
+
+		if (status & IRQ_STTS_BAM_ERROR_IRQ) {
+			SPS_ERR(dev,
+				"sps:bam %pa 0x%p(va);bam irq status=0x%x.\nsps: BAM_ERROR_IRQ\n",
+				BAM_ID(dev), dev->base, status);
+			bam_output_register_content(base, ee);
+			*cb_case = SPS_CALLBACK_BAM_ERROR_IRQ;
+		} else if (status & IRQ_STTS_BAM_HRESP_ERR_IRQ) {
+			SPS_ERR(dev,
+				"sps:bam %pa 0x%p(va);bam irq status=0x%x.\nsps: BAM_HRESP_ERR_IRQ\n",
+				BAM_ID(dev), dev->base, status);
+			bam_output_register_content(base, ee);
+			*cb_case = SPS_CALLBACK_BAM_HRESP_ERR_IRQ;
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+		} else if (status & IRQ_STTS_BAM_TIMER_IRQ) {
+			SPS_DBG1(dev,
+				"sps:bam 0x%p(va);receive BAM_TIMER_IRQ\n",
+					dev->base);
+			*cb_case = SPS_CALLBACK_BAM_TIMER_IRQ;
+#endif
+		} else
+			SPS_INFO(dev,
+				"sps:bam %pa 0x%p(va);bam irq status=0x%x.\n",
+				BAM_ID(dev), dev->base, status);
+
+		bam_write_reg(base, IRQ_CLR, 0, status);
+	}
+
+	source &= (mask|(1UL << 31));
+	return source;
+}
+
+/*
+ * Reset a BAM pipe
+ */
+void bam_pipe_reset(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=%pa 0x%p(va).pipe=%d.",
+			__func__, BAM_ID(dev), dev->base, pipe);
+
+	bam_write_reg(base, P_RST, pipe, 1);
+	wmb(); /* ensure pipe is reset */
+	bam_write_reg(base, P_RST, pipe, 0);
+	wmb(); /* ensure pipe reset is de-asserted*/
+}
+
+/*
+ * Disable a BAM pipe
+ */
+void bam_disable_pipe(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=0x%p(va).pipe=%d.", __func__, base, pipe);
+	bam_write_reg_field(base, P_CTRL, pipe, P_EN, 0);
+	wmb(); /* ensure pipe is disabled */
+}
+
+/*
+ * Check if the last desc is ZLT
+ */
+bool bam_pipe_check_zlt(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return false;
+	}
+
+	if (bam_read_reg_field(base, P_HALT, pipe, P_HALT_P_LAST_DESC_ZLT)) {
+		SPS_DBG(dev,
+			"sps:%s:bam=0x%p(va).pipe=%d: the last desc is ZLT.",
+			__func__, base, pipe);
+		return true;
+	}
+
+	SPS_DBG(dev,
+		"sps:%s:bam=0x%p(va).pipe=%d: the last desc is not ZLT.",
+		__func__, base, pipe);
+	return false;
+}
+
+/*
+ * Check if desc FIFO is empty
+ */
+bool bam_pipe_check_pipe_empty(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return false;
+	}
+
+	if (bam_read_reg_field(base, P_HALT, pipe, P_HALT_P_PIPE_EMPTY)) {
+		SPS_DBG(dev,
+			"sps:%s:bam=0x%p(va).pipe=%d: desc FIFO is empty.",
+			__func__, base, pipe);
+		return true;
+	}
+
+	SPS_DBG(dev,
+		"sps:%s:bam=0x%p(va).pipe=%d: desc FIFO is not empty.",
+		__func__, base, pipe);
+	return false;
+}
+
+/**
+ * Initialize a BAM pipe
+ */
+int bam_pipe_init(void *base, u32 pipe,	struct bam_pipe_parameters *param,
+					u32 ee)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return SPS_ERROR;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=%pa 0x%p(va).pipe=%d.",
+			__func__, BAM_ID(dev), dev->base, pipe);
+
+	/* Reset the BAM pipe */
+	bam_write_reg(base, P_RST, pipe, 1);
+	/* No delay needed */
+	bam_write_reg(base, P_RST, pipe, 0);
+
+	/* Enable the Pipe Interrupt at the BAM level */
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE, ee, (1 << pipe), 1);
+
+	bam_write_reg(base, P_IRQ_EN, pipe, param->pipe_irq_mask);
+
+	bam_write_reg_field(base, P_CTRL, pipe, P_DIRECTION, param->dir);
+	bam_write_reg_field(base, P_CTRL, pipe, P_SYS_MODE, param->mode);
+
+	bam_write_reg(base, P_EVNT_GEN_TRSHLD, pipe, param->event_threshold);
+
+	bam_write_reg(base, P_DESC_FIFO_ADDR, pipe,
+			SPS_GET_LOWER_ADDR(param->desc_base));
+	bam_write_reg_field(base, P_FIFO_SIZES, pipe, P_DESC_FIFO_SIZE,
+			    param->desc_size);
+
+	bam_write_reg_field(base, P_CTRL, pipe, P_SYS_STRM,
+			    param->stream_mode);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (SPS_LPAE && SPS_GET_UPPER_ADDR(param->desc_base))
+		bam_write_reg(base, P_DESC_FIFO_ADDR_MSB, pipe,
+				SPS_GET_UPPER_ADDR(param->desc_base));
+
+	bam_write_reg_field(base, P_CTRL, pipe, P_LOCK_GROUP,
+				param->lock_group);
+
+	SPS_DBG(dev, "sps:bam=0x%p(va).pipe=%d.lock_group=%d.\n",
+			dev->base, pipe, param->lock_group);
+#endif
+
+	if (param->mode == BAM_PIPE_MODE_BAM2BAM) {
+		u32 peer_dest_addr = param->peer_phys_addr +
+				      bam_get_register_offset(base, P_EVNT_REG,
+						      param->peer_pipe);
+
+		bam_write_reg(base, P_DATA_FIFO_ADDR, pipe,
+			      SPS_GET_LOWER_ADDR(param->data_base));
+		bam_write_reg_field(base, P_FIFO_SIZES, pipe,
+				    P_DATA_FIFO_SIZE, param->data_size);
+
+		bam_write_reg(base, P_EVNT_DEST_ADDR, pipe, peer_dest_addr);
+
+		SPS_DBG2(dev,
+			"sps:bam=0x%p(va).pipe=%d.peer_bam=0x%x.peer_pipe=%d.\n",
+			dev->base, pipe,
+			(u32) param->peer_phys_addr,
+			param->peer_pipe);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+		if (SPS_LPAE && SPS_GET_UPPER_ADDR(param->data_base)) {
+			bam_write_reg(base, P_EVNT_DEST_ADDR_MSB, pipe, 0x0);
+			bam_write_reg(base, P_DATA_FIFO_ADDR_MSB, pipe,
+				      SPS_GET_UPPER_ADDR(param->data_base));
+		}
+
+		bam_write_reg_field(base, P_CTRL, pipe, P_WRITE_NWD,
+					param->write_nwd);
+
+		SPS_DBG(dev, "sps:%s WRITE_NWD bit for this bam2bam pipe.",
+			param->write_nwd ? "Set" : "Do not set");
+#endif
+	}
+
+	/* Pipe Enable - at last */
+	bam_write_reg_field(base, P_CTRL, pipe, P_EN, 1);
+
+	return 0;
+}
+
+/**
+ * Reset the BAM pipe
+ *
+ */
+void bam_pipe_exit(void *base, u32 pipe, u32 ee)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=%pa 0x%p(va).pipe=%d.",
+			__func__, BAM_ID(dev), dev->base, pipe);
+
+	bam_write_reg(base, P_IRQ_EN, pipe, 0);
+
+	/* Disable the Pipe Interrupt at the BAM level */
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE, ee, (1 << pipe), 0);
+
+	/* Pipe Disable */
+	bam_write_reg_field(base, P_CTRL, pipe, P_EN, 0);
+}
+
+/**
+ * Enable a BAM pipe
+ *
+ */
+void bam_pipe_enable(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=%pa 0x%p(va).pipe=%d.",
+			__func__, BAM_ID(dev), dev->base, pipe);
+
+	if (bam_read_reg_field(base, P_CTRL, pipe, P_EN))
+		SPS_DBG2(dev, "sps:bam=0x%p(va).pipe=%d is already enabled.\n",
+				dev->base, pipe);
+	else
+		bam_write_reg_field(base, P_CTRL, pipe, P_EN, 1);
+}
+
+/**
+ * Diasble a BAM pipe
+ *
+ */
+void bam_pipe_disable(void *base, u32 pipe)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev, "sps:%s:bam=%pa 0x%p(va).pipe=%d.",
+			__func__, BAM_ID(dev), dev->base, pipe);
+
+	bam_write_reg_field(base, P_CTRL, pipe, P_EN, 0);
+}
+
+/**
+ * Check if a BAM pipe is enabled.
+ *
+ */
+int bam_pipe_is_enabled(void *base, u32 pipe)
+{
+	return bam_read_reg_field(base, P_CTRL, pipe, P_EN);
+}
+
+/**
+ * Configure interrupt for a BAM pipe
+ *
+ */
+void bam_pipe_set_irq(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 ee)
+{
+	struct sps_bam *dev = to_sps_bam_dev(base);
+
+	if ((dev == NULL) || (&dev->base != base)) {
+		SPS_ERR(sps, "%s:Failed to get dev for base addr 0x%p\n",
+				__func__, base);
+		return;
+	}
+	SPS_DBG2(dev,
+		"sps:%s:bam=%pa 0x%p(va).pipe=%d; irq_en:%d; src_mask:0x%x; ee:%d.\n",
+			__func__, BAM_ID(dev), dev->base, pipe,
+			irq_en, src_mask, ee);
+	if (src_mask & BAM_PIPE_IRQ_RST_ERROR) {
+		if (enhd_pipe)
+			bam_write_reg_field(base, IRQ_EN, 0,
+					IRQ_EN_BAM_ERROR_EN, 0);
+		else {
+			src_mask &= ~BAM_PIPE_IRQ_RST_ERROR;
+			SPS_DBG2(dev,
+				"sps:%s:SPS_O_RST_ERROR is not supported\n",
+				__func__);
+		}
+	}
+	if (src_mask & BAM_PIPE_IRQ_HRESP_ERROR) {
+		if (enhd_pipe)
+			bam_write_reg_field(base, IRQ_EN, 0,
+					IRQ_EN_BAM_HRESP_ERR_EN, 0);
+		else {
+			src_mask &= ~BAM_PIPE_IRQ_HRESP_ERROR;
+			SPS_DBG2(dev,
+				"sps:%s:SPS_O_HRESP_ERROR is not supported\n",
+				__func__);
+		}
+	}
+
+	bam_write_reg(base, P_IRQ_EN, pipe, src_mask);
+	bam_write_reg_field(base, IRQ_SRCS_MSK_EE, ee, (1 << pipe), irq_en);
+}
+
+/**
+ * Configure a BAM pipe for satellite MTI use
+ *
+ */
+void bam_pipe_satellite_mti(void *base, u32 pipe, u32 irq_gen_addr, u32 ee)
+{
+	bam_write_reg(base, P_IRQ_EN, pipe, 0);
+#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
+	bam_write_reg(base, P_IRQ_DEST_ADDR, pipe, irq_gen_addr);
+	bam_write_reg_field(base, IRQ_SIC_SEL, 0, (1 << pipe), 1);
+#endif
+	bam_write_reg_field(base, IRQ_SRCS_MSK, 0, (1 << pipe), 1);
+}
+
+/**
+ * Configure MTI for a BAM pipe
+ *
+ */
+void bam_pipe_set_mti(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 irq_gen_addr)
+{
+	/*
+	 * MTI use is only supported on BAMs when global config is controlled
+	 * by a remote processor.
+	 * Consequently, the global configuration register to enable SIC (MTI)
+	 * support cannot be accessed.
+	 * The remote processor must be relied upon to enable the SIC and the
+	 * interrupt. Since the remote processor enable both SIC and interrupt,
+	 * the interrupt enable mask must be set to zero for polling mode.
+	 */
+#ifndef CONFIG_SPS_SUPPORT_NDP_BAM
+	bam_write_reg(base, P_IRQ_DEST_ADDR, pipe, irq_gen_addr);
+#endif
+	if (!irq_en)
+		src_mask = 0;
+
+	bam_write_reg(base, P_IRQ_EN, pipe, src_mask);
+}
+
+/**
+ * Get and Clear BAM pipe IRQ status
+ *
+ */
+u32 bam_pipe_get_and_clear_irq_status(void *base, u32 pipe)
+{
+	u32 status = 0;
+
+	status = bam_read_reg(base, P_IRQ_STTS, pipe);
+	bam_write_reg(base, P_IRQ_CLR, pipe, status);
+
+	return status;
+}
+
+/**
+ * Set write offset for a BAM pipe
+ *
+ */
+void bam_pipe_set_desc_write_offset(void *base, u32 pipe, u32 next_write)
+{
+	/*
+	 * It is not necessary to perform a read-modify-write masking to write
+	 * the P_DESC_FIFO_PEER_OFST value, since the other field in the
+	 * register (P_BYTES_CONSUMED) is read-only.
+	 */
+	bam_write_reg_field(base, P_EVNT_REG, pipe, P_DESC_FIFO_PEER_OFST,
+			    next_write);
+}
+
+/**
+ * Get write offset for a BAM pipe
+ *
+ */
+u32 bam_pipe_get_desc_write_offset(void *base, u32 pipe)
+{
+	return bam_read_reg_field(base, P_EVNT_REG, pipe,
+				  P_DESC_FIFO_PEER_OFST);
+}
+
+/**
+ * Get read offset for a BAM pipe
+ *
+ */
+u32 bam_pipe_get_desc_read_offset(void *base, u32 pipe)
+{
+	return bam_read_reg_field(base, P_SW_OFSTS, pipe, SW_DESC_OFST);
+}
+
+/**
+ * Configure inactivity timer count for a BAM pipe
+ *
+ */
+void bam_pipe_timer_config(void *base, u32 pipe, enum bam_pipe_timer_mode mode,
+			 u32 timeout_count)
+{
+	u32 for_all_pipes = 0;
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	for_all_pipes = bam_read_reg_field(base, REVISION, 0,
+						BAM_NUM_INACTIV_TMRS);
+#endif
+
+	if (for_all_pipes) {
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+		bam_write_reg_field(base, TIMER_CTRL, 0, TIMER_MODE, mode);
+		bam_write_reg_field(base, TIMER_CTRL, 0, TIMER_TRSHLD,
+				    timeout_count);
+#endif
+	} else {
+		bam_write_reg_field(base, P_TIMER_CTRL, pipe, P_TIMER_MODE,
+					mode);
+		bam_write_reg_field(base, P_TIMER_CTRL, pipe, P_TIMER_TRSHLD,
+				    timeout_count);
+	}
+}
+
+/**
+ * Reset inactivity timer for a BAM pipe
+ *
+ */
+void bam_pipe_timer_reset(void *base, u32 pipe)
+{
+	u32 for_all_pipes = 0;
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	for_all_pipes = bam_read_reg_field(base, REVISION, 0,
+						BAM_NUM_INACTIV_TMRS);
+#endif
+
+	if (for_all_pipes) {
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+		/* reset */
+		bam_write_reg_field(base, TIMER_CTRL, 0, TIMER_RST, 0);
+		/* active */
+		bam_write_reg_field(base, TIMER_CTRL, 0, TIMER_RST, 1);
+#endif
+	} else {
+		/* reset */
+		bam_write_reg_field(base, P_TIMER_CTRL, pipe, P_TIMER_RST, 0);
+		/* active */
+		bam_write_reg_field(base, P_TIMER_CTRL, pipe, P_TIMER_RST, 1);
+	}
+}
+
+/**
+ * Get inactivity timer count for a BAM pipe
+ *
+ */
+u32 bam_pipe_timer_get_count(void *base, u32 pipe)
+{
+	return bam_read_reg(base, P_TIMER, pipe);
+}
+
+/* halt and un-halt a pipe */
+void bam_pipe_halt(void *base, u32 pipe, bool halt)
+{
+	if (halt)
+		bam_write_reg_field(base, P_HALT, pipe, P_HALT_P_HALT, 1);
+	else
+		bam_write_reg_field(base, P_HALT, pipe, P_HALT_P_HALT, 0);
+}
+
+/* output the content of BAM-level registers */
+void print_bam_reg(void *virt_addr)
+{
+	int i, n, index = 0;
+	u32 *bam = (u32 *) virt_addr;
+	u32 ctrl;
+	u32 ver;
+	u32 pipes;
+	u32 offset = 0;
+
+	if (bam == NULL)
+		return;
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (bam_type == SPS_BAM_NDP_4K) {
+		ctrl = bam[0x0 / 4];
+		ver = bam[0x1000 / 4];
+		pipes = bam[0x1008 / 4];
+	} else {
+		ctrl = bam[0x0 / 4];
+		ver = bam[0x4 / 4];
+		pipes = bam[0x3c / 4];
+	}
+#else
+	ctrl = bam[0xf80 / 4];
+	ver = bam[0xf84 / 4];
+	pipes = bam[0xfbc / 4];
+#endif
+
+	SPS_DUMP("%s",
+		"\nsps:<bam-begin> --- Content of BAM-level registers---\n");
+
+	SPS_DUMP("BAM_CTRL: 0x%x.\n", ctrl);
+	SPS_DUMP("BAM_REVISION: 0x%x.\n", ver);
+	SPS_DUMP("NUM_PIPES: 0x%x.\n", pipes);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (bam_type == SPS_BAM_NDP_4K)
+		offset = 0x301c;
+	else
+		offset = 0x80;
+	for (i = 0x0; i < offset; i += 0x10)
+
+#else
+	for (i = 0xf80; i < 0x1000; i += 0x10)
+#endif
+		SPS_DUMP("bam addr 0x%x: 0x%x,0x%x,0x%x,0x%x.\n", i,
+			bam[i / 4], bam[(i / 4) + 1],
+			bam[(i / 4) + 2], bam[(i / 4) + 3]);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (bam_type == SPS_BAM_NDP_4K) {
+		offset = 0x3000;
+		index = 0x1000;
+	} else {
+		offset = 0x800;
+		index = 0x80;
+	}
+	for (i = offset, n = 0; n++ < 8; i += index)
+#else
+	for (i = 0x1800, n = 0; n++ < 4; i += 0x80)
+#endif
+		SPS_DUMP("bam addr 0x%x: 0x%x,0x%x,0x%x,0x%x.\n", i,
+			bam[i / 4], bam[(i / 4) + 1],
+			bam[(i / 4) + 2], bam[(i / 4) + 3]);
+
+	SPS_DUMP("%s",
+		"\nsps:<bam-begin> --- Content of BAM-level registers ---\n");
+}
+
+/* output the content of BAM pipe registers */
+void print_bam_pipe_reg(void *virt_addr, u32 pipe_index)
+{
+	int i;
+	u32 *bam = (u32 *) virt_addr;
+	u32 pipe = pipe_index;
+	u32 offset = 0;
+
+	if (bam == NULL)
+		return;
+
+	SPS_DUMP("\nsps:<pipe-begin> --- Content of Pipe %d registers ---\n",
+			pipe);
+
+	SPS_DUMP("%s", "-- Pipe Management Registers --\n");
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (bam_type == SPS_BAM_NDP_4K)
+		offset = 0x13000;
+	else
+		offset = 0x1000;
+	for (i = offset + 0x1000 * pipe; i < offset + 0x1000 * pipe + 0x80;
+	    i += 0x10)
+#else
+	for (i = 0x0000 + 0x80 * pipe; i < 0x0000 + 0x80 * (pipe + 1);
+	    i += 0x10)
+#endif
+		SPS_DUMP("bam addr 0x%x: 0x%x,0x%x,0x%x,0x%x.\n", i,
+			bam[i / 4], bam[(i / 4) + 1],
+			bam[(i / 4) + 2], bam[(i / 4) + 3]);
+
+	SPS_DUMP("%s",
+		"-- Pipe Configuration and Internal State Registers --\n");
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	if (bam_type == SPS_BAM_NDP_4K)
+		offset = 0x13800;
+	else
+		offset = 0x1800;
+	for (i = offset + 0x1000 * pipe; i < offset + 0x1000 * pipe + 0x40;
+		i += 0x10)
+#else
+	for (i = 0x1000 + 0x40 * pipe; i < 0x1000 + 0x40 * (pipe + 1);
+	    i += 0x10)
+#endif
+		SPS_DUMP("bam addr 0x%x: 0x%x,0x%x,0x%x,0x%x.\n", i,
+			bam[i / 4], bam[(i / 4) + 1],
+			bam[(i / 4) + 2], bam[(i / 4) + 3]);
+
+	SPS_DUMP("\nsps:<pipe-end> --- Content of Pipe %d registers ---\n",
+			pipe);
+}
+
+/* output the content of selected BAM-level registers */
+void print_bam_selected_reg(void *virt_addr, u32 ee)
+{
+	void *base = virt_addr;
+
+	u32 bam_ctrl;
+	u32 bam_revision;
+	u32 bam_rev_num;
+	u32 bam_rev_ee_num;
+
+	u32 bam_num_pipes;
+	u32 bam_pipe_num;
+	u32 bam_data_addr_bus_width;
+
+	u32 bam_desc_cnt_trshld;
+	u32 bam_desc_cnt_trd_val;
+
+	u32 bam_irq_en;
+	u32 bam_irq_stts;
+
+	u32 bam_irq_src_ee = 0;
+	u32 bam_irq_msk_ee = 0;
+	u32 bam_irq_unmsk_ee = 0;
+	u32 bam_pipe_attr_ee = 0;
+
+	u32 bam_ahb_err_ctrl;
+	u32 bam_ahb_err_addr;
+	u32 bam_ahb_err_data;
+	u32 bam_cnfg_bits;
+
+	u32 bam_sw_rev = 0;
+	u32 bam_timer = 0;
+	u32 bam_timer_ctrl = 0;
+	u32 bam_ahb_err_addr_msb = 0;
+
+	if (base == NULL)
+		return;
+
+	bam_ctrl = bam_read_reg(base, CTRL, 0);
+	bam_revision = bam_read_reg(base, REVISION, 0);
+	bam_rev_num = bam_read_reg_field(base, REVISION, 0, BAM_REVISION);
+	bam_rev_ee_num = bam_read_reg_field(base, REVISION, 0, BAM_NUM_EES);
+
+	bam_num_pipes = bam_read_reg(base, NUM_PIPES, 0);
+	bam_pipe_num = bam_read_reg_field(base, NUM_PIPES, 0, BAM_NUM_PIPES);
+	bam_data_addr_bus_width = bam_read_reg_field(base, NUM_PIPES, 0,
+					BAM_DATA_ADDR_BUS_WIDTH);
+
+	bam_desc_cnt_trshld = bam_read_reg(base, DESC_CNT_TRSHLD, 0);
+	bam_desc_cnt_trd_val = bam_read_reg_field(base, DESC_CNT_TRSHLD, 0,
+					BAM_DESC_CNT_TRSHLD);
+
+	bam_irq_en = bam_read_reg(base, IRQ_EN, 0);
+	bam_irq_stts = bam_read_reg(base, IRQ_STTS, 0);
+
+	if (ee < BAM_MAX_EES) {
+		bam_irq_src_ee = bam_read_reg(base, IRQ_SRCS_EE, ee);
+		bam_irq_msk_ee = bam_read_reg(base, IRQ_SRCS_MSK_EE, ee);
+		bam_irq_unmsk_ee = bam_read_reg(base, IRQ_SRCS_UNMASKED_EE, ee);
+	}
+
+	bam_ahb_err_ctrl = bam_read_reg(base, AHB_MASTER_ERR_CTRLS, 0);
+	bam_ahb_err_addr = bam_read_reg(base, AHB_MASTER_ERR_ADDR, 0);
+	bam_ahb_err_data = bam_read_reg(base, AHB_MASTER_ERR_DATA, 0);
+	bam_cnfg_bits = bam_read_reg(base, CNFG_BITS, 0);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	bam_sw_rev = bam_read_reg(base, SW_REVISION, 0);
+	bam_timer = bam_read_reg(base, TIMER, 0);
+	bam_timer_ctrl = bam_read_reg(base, TIMER_CTRL, 0);
+	bam_ahb_err_addr_msb = SPS_LPAE ?
+		bam_read_reg(base, AHB_MASTER_ERR_ADDR_MSB, 0) : 0;
+	if (ee < BAM_MAX_EES)
+		bam_pipe_attr_ee = enhd_pipe ?
+			bam_read_reg(base, PIPE_ATTR_EE, ee) : 0x0;
+#endif
+
+
+	SPS_DUMP("%s", "\nsps:<bam-begin> --- BAM-level registers ---\n\n");
+
+	SPS_DUMP("BAM_CTRL: 0x%x\n", bam_ctrl);
+	SPS_DUMP("BAM_REVISION: 0x%x\n", bam_revision);
+	SPS_DUMP("    REVISION: 0x%x\n", bam_rev_num);
+	SPS_DUMP("    NUM_EES: %d\n", bam_rev_ee_num);
+	SPS_DUMP("BAM_SW_REVISION: 0x%x\n", bam_sw_rev);
+	SPS_DUMP("BAM_NUM_PIPES: %d\n", bam_num_pipes);
+	SPS_DUMP("BAM_DATA_ADDR_BUS_WIDTH: %d\n",
+			((bam_data_addr_bus_width == 0x0) ? 32 : 36));
+	SPS_DUMP("    NUM_PIPES: %d\n", bam_pipe_num);
+	SPS_DUMP("BAM_DESC_CNT_TRSHLD: 0x%x\n", bam_desc_cnt_trshld);
+	SPS_DUMP("    DESC_CNT_TRSHLD: 0x%x (%d)\n", bam_desc_cnt_trd_val,
+			bam_desc_cnt_trd_val);
+
+	SPS_DUMP("BAM_IRQ_EN: 0x%x\n", bam_irq_en);
+	SPS_DUMP("BAM_IRQ_STTS: 0x%x\n", bam_irq_stts);
+
+	if (ee < BAM_MAX_EES) {
+		SPS_DUMP("BAM_IRQ_SRCS_EE(%d): 0x%x\n", ee, bam_irq_src_ee);
+		SPS_DUMP("BAM_IRQ_SRCS_MSK_EE(%d): 0x%x\n", ee, bam_irq_msk_ee);
+		SPS_DUMP("BAM_IRQ_SRCS_UNMASKED_EE(%d): 0x%x\n", ee,
+				bam_irq_unmsk_ee);
+		SPS_DUMP("BAM_PIPE_ATTR_EE(%d): 0x%x\n", ee, bam_pipe_attr_ee);
+	}
+
+	SPS_DUMP("BAM_AHB_MASTER_ERR_CTRLS: 0x%x\n", bam_ahb_err_ctrl);
+	SPS_DUMP("BAM_AHB_MASTER_ERR_ADDR: 0x%x\n", bam_ahb_err_addr);
+	SPS_DUMP("BAM_AHB_MASTER_ERR_ADDR_MSB: 0x%x\n", bam_ahb_err_addr_msb);
+	SPS_DUMP("BAM_AHB_MASTER_ERR_DATA: 0x%x\n", bam_ahb_err_data);
+
+	SPS_DUMP("BAM_CNFG_BITS: 0x%x\n", bam_cnfg_bits);
+	SPS_DUMP("BAM_TIMER: 0x%x\n", bam_timer);
+	SPS_DUMP("BAM_TIMER_CTRL: 0x%x\n", bam_timer_ctrl);
+
+	SPS_DUMP("%s", "\nsps:<bam-end> --- BAM-level registers ---\n\n");
+}
+
+/* output the content of selected BAM pipe registers */
+void print_bam_pipe_selected_reg(void *virt_addr, u32 pipe_index)
+{
+	void *base = virt_addr;
+	u32 pipe = pipe_index;
+
+	u32 p_ctrl;
+	u32 p_sys_mode;
+	u32 p_direction;
+	u32 p_lock_group = 0;
+
+	u32 p_irq_en;
+	u32 p_irq_stts;
+	u32 p_irq_stts_eot;
+	u32 p_irq_stts_int;
+
+	u32 p_prd_sdbd;
+	u32 p_bytes_free;
+	u32 p_prd_ctrl;
+	u32 p_prd_toggle;
+	u32 p_prd_sb_updated;
+
+	u32 p_con_sdbd;
+	u32 p_bytes_avail;
+	u32 p_con_ctrl;
+	u32 p_con_toggle;
+	u32 p_con_ack_toggle;
+	u32 p_con_ack_toggle_r;
+	u32 p_con_wait_4_ack;
+	u32 p_con_sb_updated;
+
+	u32 p_sw_offset;
+	u32 p_read_pointer;
+	u32 p_evnt_reg;
+	u32 p_write_pointer;
+
+	u32 p_evnt_dest;
+	u32 p_evnt_dest_msb = 0;
+	u32 p_desc_fifo_addr;
+	u32 p_desc_fifo_addr_msb = 0;
+	u32 p_desc_fifo_size;
+	u32 p_data_fifo_addr;
+	u32 p_data_fifo_addr_msb = 0;
+	u32 p_data_fifo_size;
+	u32 p_fifo_sizes;
+
+	u32 p_evnt_trd;
+	u32 p_evnt_trd_val;
+
+	u32 p_retr_ct;
+	u32 p_retr_offset;
+	u32 p_si_ct;
+	u32 p_si_offset;
+	u32 p_df_ct = 0;
+	u32 p_df_offset = 0;
+	u32 p_au_ct1;
+	u32 p_psm_ct2;
+	u32 p_psm_ct3;
+	u32 p_psm_ct3_msb = 0;
+	u32 p_psm_ct4;
+	u32 p_psm_ct5;
+
+	u32 p_timer;
+	u32 p_timer_ctrl;
+
+	if (base == NULL)
+		return;
+
+	p_ctrl = bam_read_reg(base, P_CTRL, pipe);
+	p_sys_mode = bam_read_reg_field(base, P_CTRL, pipe, P_SYS_MODE);
+	p_direction = bam_read_reg_field(base, P_CTRL, pipe, P_DIRECTION);
+
+	p_irq_en = bam_read_reg(base, P_IRQ_EN, pipe);
+	p_irq_stts = bam_read_reg(base, P_IRQ_STTS, pipe);
+	p_irq_stts_eot = bam_read_reg_field(base, P_IRQ_STTS, pipe,
+					P_IRQ_STTS_P_TRNSFR_END_IRQ);
+	p_irq_stts_int = bam_read_reg_field(base, P_IRQ_STTS, pipe,
+					P_IRQ_STTS_P_PRCSD_DESC_IRQ);
+
+	p_prd_sdbd = bam_read_reg(base, P_PRDCR_SDBND, pipe);
+	p_bytes_free = bam_read_reg_field(base, P_PRDCR_SDBND, pipe,
+					P_PRDCR_SDBNDn_BAM_P_BYTES_FREE);
+	p_prd_ctrl = bam_read_reg_field(base, P_PRDCR_SDBND, pipe,
+					P_PRDCR_SDBNDn_BAM_P_CTRL);
+	p_prd_toggle = bam_read_reg_field(base, P_PRDCR_SDBND, pipe,
+					P_PRDCR_SDBNDn_BAM_P_TOGGLE);
+	p_prd_sb_updated = bam_read_reg_field(base, P_PRDCR_SDBND, pipe,
+					P_PRDCR_SDBNDn_BAM_P_SB_UPDATED);
+	p_con_sdbd = bam_read_reg(base, P_CNSMR_SDBND, pipe);
+	p_bytes_avail = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_BYTES_AVAIL);
+	p_con_ctrl = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_CTRL);
+	p_con_toggle = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_TOGGLE);
+	p_con_ack_toggle = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE);
+	p_con_ack_toggle_r = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_ACK_TOGGLE_R);
+	p_con_wait_4_ack = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_WAIT_4_ACK);
+	p_con_sb_updated = bam_read_reg_field(base, P_CNSMR_SDBND, pipe,
+					P_CNSMR_SDBNDn_BAM_P_SB_UPDATED);
+
+	p_sw_offset = bam_read_reg(base, P_SW_OFSTS, pipe);
+	p_read_pointer = bam_read_reg_field(base, P_SW_OFSTS, pipe,
+						SW_DESC_OFST);
+	p_evnt_reg = bam_read_reg(base, P_EVNT_REG, pipe);
+	p_write_pointer = bam_read_reg_field(base, P_EVNT_REG, pipe,
+						P_DESC_FIFO_PEER_OFST);
+
+	p_evnt_dest = bam_read_reg(base, P_EVNT_DEST_ADDR, pipe);
+	p_desc_fifo_addr = bam_read_reg(base, P_DESC_FIFO_ADDR, pipe);
+	p_desc_fifo_size = bam_read_reg_field(base, P_FIFO_SIZES, pipe,
+						P_DESC_FIFO_SIZE);
+	p_data_fifo_addr = bam_read_reg(base, P_DATA_FIFO_ADDR, pipe);
+	p_data_fifo_size = bam_read_reg_field(base, P_FIFO_SIZES, pipe,
+						P_DATA_FIFO_SIZE);
+	p_fifo_sizes = bam_read_reg(base, P_FIFO_SIZES, pipe);
+
+	p_evnt_trd = bam_read_reg(base, P_EVNT_GEN_TRSHLD, pipe);
+	p_evnt_trd_val = bam_read_reg_field(base, P_EVNT_GEN_TRSHLD, pipe,
+					P_EVNT_GEN_TRSHLD_P_TRSHLD);
+
+	p_retr_ct = bam_read_reg(base, P_RETR_CNTXT, pipe);
+	p_retr_offset = bam_read_reg_field(base, P_RETR_CNTXT, pipe,
+					P_RETR_CNTXT_RETR_DESC_OFST);
+	p_si_ct = bam_read_reg(base, P_SI_CNTXT, pipe);
+	p_si_offset = bam_read_reg_field(base, P_SI_CNTXT, pipe,
+					P_SI_CNTXT_SI_DESC_OFST);
+	p_au_ct1 = bam_read_reg(base, P_AU_PSM_CNTXT_1, pipe);
+	p_psm_ct2 = bam_read_reg(base, P_PSM_CNTXT_2, pipe);
+	p_psm_ct3 = bam_read_reg(base, P_PSM_CNTXT_3, pipe);
+	p_psm_ct4 = bam_read_reg(base, P_PSM_CNTXT_4, pipe);
+	p_psm_ct5 = bam_read_reg(base, P_PSM_CNTXT_5, pipe);
+
+	p_timer = bam_read_reg(base, P_TIMER, pipe);
+	p_timer_ctrl = bam_read_reg(base, P_TIMER_CTRL, pipe);
+
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+	p_evnt_dest_msb = SPS_LPAE ?
+		bam_read_reg(base, P_EVNT_DEST_ADDR_MSB, pipe) : 0;
+
+	p_desc_fifo_addr_msb = SPS_LPAE ?
+		bam_read_reg(base, P_DESC_FIFO_ADDR_MSB, pipe) : 0;
+	p_data_fifo_addr_msb = SPS_LPAE ?
+		bam_read_reg(base, P_DATA_FIFO_ADDR_MSB, pipe) : 0;
+
+	p_psm_ct3_msb = SPS_LPAE ? bam_read_reg(base, P_PSM_CNTXT_3, pipe) : 0;
+	p_lock_group = bam_read_reg_field(base, P_CTRL, pipe, P_LOCK_GROUP);
+	p_df_ct = bam_read_reg(base, P_DF_CNTXT, pipe);
+	p_df_offset = bam_read_reg_field(base, P_DF_CNTXT, pipe,
+					P_DF_CNTXT_DF_DESC_OFST);
+#endif
+
+	SPS_DUMP("\nsps:<pipe-begin> --- Registers of Pipe %d ---\n\n", pipe);
+
+	SPS_DUMP("BAM_P_CTRL: 0x%x\n", p_ctrl);
+	SPS_DUMP("    SYS_MODE: %d\n", p_sys_mode);
+	if (p_direction)
+		SPS_DUMP("    DIRECTION:%d->Producer\n", p_direction);
+	else
+		SPS_DUMP("    DIRECTION:%d->Consumer\n", p_direction);
+	SPS_DUMP("    LOCK_GROUP: 0x%x (%d)\n", p_lock_group, p_lock_group);
+
+	SPS_DUMP("BAM_P_IRQ_EN: 0x%x\n", p_irq_en);
+	SPS_DUMP("BAM_P_IRQ_STTS: 0x%x\n", p_irq_stts);
+	SPS_DUMP("    TRNSFR_END_IRQ(EOT): 0x%x\n", p_irq_stts_eot);
+	SPS_DUMP("    PRCSD_DESC_IRQ(INT): 0x%x\n", p_irq_stts_int);
+
+	SPS_DUMP("BAM_P_PRDCR_SDBND: 0x%x\n", p_prd_sdbd);
+	SPS_DUMP("    BYTES_FREE: 0x%x (%d)\n", p_bytes_free, p_bytes_free);
+	SPS_DUMP("    CTRL: 0x%x\n", p_prd_ctrl);
+	SPS_DUMP("    TOGGLE: %d\n", p_prd_toggle);
+	SPS_DUMP("    SB_UPDATED: %d\n", p_prd_sb_updated);
+	SPS_DUMP("BAM_P_CNSMR_SDBND: 0x%x\n", p_con_sdbd);
+	SPS_DUMP("    WAIT_4_ACK: %d\n", p_con_wait_4_ack);
+	SPS_DUMP("    BYTES_AVAIL: 0x%x (%d)\n", p_bytes_avail, p_bytes_avail);
+	SPS_DUMP("    CTRL: 0x%x\n", p_con_ctrl);
+	SPS_DUMP("    TOGGLE: %d\n", p_con_toggle);
+	SPS_DUMP("    ACK_TOGGLE: %d\n", p_con_ack_toggle);
+	SPS_DUMP("    ACK_TOGGLE_R: %d\n", p_con_ack_toggle_r);
+	SPS_DUMP("    SB_UPDATED: %d\n", p_con_sb_updated);
+
+	SPS_DUMP("BAM_P_SW_DESC_OFST: 0x%x\n", p_sw_offset);
+	SPS_DUMP("    SW_DESC_OFST: 0x%x\n", p_read_pointer);
+	SPS_DUMP("BAM_P_EVNT_REG: 0x%x\n", p_evnt_reg);
+	SPS_DUMP("    DESC_FIFO_PEER_OFST: 0x%x\n", p_write_pointer);
+
+	SPS_DUMP("BAM_P_RETR_CNTXT: 0x%x\n", p_retr_ct);
+	SPS_DUMP("    RETR_OFFSET: 0x%x\n", p_retr_offset);
+	SPS_DUMP("BAM_P_SI_CNTXT: 0x%x\n", p_si_ct);
+	SPS_DUMP("    SI_OFFSET: 0x%x\n", p_si_offset);
+	SPS_DUMP("BAM_P_DF_CNTXT: 0x%x\n", p_df_ct);
+	SPS_DUMP("    DF_OFFSET: 0x%x\n", p_df_offset);
+
+	SPS_DUMP("BAM_P_DESC_FIFO_ADDR: 0x%x\n", p_desc_fifo_addr);
+	SPS_DUMP("BAM_P_DESC_FIFO_ADDR_MSB: 0x%x\n", p_desc_fifo_addr_msb);
+	SPS_DUMP("BAM_P_DATA_FIFO_ADDR: 0x%x\n", p_data_fifo_addr);
+	SPS_DUMP("BAM_P_DATA_FIFO_ADDR_MSB: 0x%x\n", p_data_fifo_addr_msb);
+	SPS_DUMP("BAM_P_FIFO_SIZES: 0x%x\n", p_fifo_sizes);
+	SPS_DUMP("    DESC_FIFO_SIZE: 0x%x (%d)\n", p_desc_fifo_size,
+							p_desc_fifo_size);
+	SPS_DUMP("    DATA_FIFO_SIZE: 0x%x (%d)\n", p_data_fifo_size,
+							p_data_fifo_size);
+
+	SPS_DUMP("BAM_P_EVNT_DEST_ADDR: 0x%x\n", p_evnt_dest);
+	SPS_DUMP("BAM_P_EVNT_DEST_ADDR_MSB: 0x%x\n", p_evnt_dest_msb);
+	SPS_DUMP("BAM_P_EVNT_GEN_TRSHLD: 0x%x\n", p_evnt_trd);
+	SPS_DUMP("    EVNT_GEN_TRSHLD: 0x%x (%d)\n", p_evnt_trd_val,
+							p_evnt_trd_val);
+
+	SPS_DUMP("BAM_P_AU_PSM_CNTXT_1: 0x%x\n", p_au_ct1);
+	SPS_DUMP("BAM_P_PSM_CNTXT_2: 0x%x\n", p_psm_ct2);
+	SPS_DUMP("BAM_P_PSM_CNTXT_3: 0x%x\n", p_psm_ct3);
+	SPS_DUMP("BAM_P_PSM_CNTXT_3_MSB: 0x%x\n", p_psm_ct3_msb);
+	SPS_DUMP("BAM_P_PSM_CNTXT_4: 0x%x\n", p_psm_ct4);
+	SPS_DUMP("BAM_P_PSM_CNTXT_5: 0x%x\n", p_psm_ct5);
+	SPS_DUMP("BAM_P_TIMER: 0x%x\n", p_timer);
+	SPS_DUMP("BAM_P_TIMER_CTRL: 0x%x\n", p_timer_ctrl);
+
+	SPS_DUMP("\nsps:<pipe-end> --- Registers of Pipe %d ---\n\n", pipe);
+}
+
+/* output descriptor FIFO of a pipe */
+void print_bam_pipe_desc_fifo(void *virt_addr, u32 pipe_index, u32 option)
+{
+	void *base = virt_addr;
+	u32 pipe = pipe_index;
+	u32 desc_fifo_addr;
+	u32 desc_fifo_size;
+	u32 *desc_fifo;
+	int i;
+	char desc_info[MAX_MSG_LEN];
+
+	if (base == NULL)
+		return;
+
+	desc_fifo_addr = bam_read_reg(base, P_DESC_FIFO_ADDR, pipe);
+	desc_fifo_size = bam_read_reg_field(base, P_FIFO_SIZES, pipe,
+						P_DESC_FIFO_SIZE);
+
+	if (desc_fifo_addr == 0) {
+		SPS_ERR(sps, "sps:%s:desc FIFO address of Pipe %d is NULL.\n",
+			__func__, pipe);
+		return;
+	} else if (desc_fifo_size == 0) {
+		SPS_ERR(sps, "sps:%s:desc FIFO size of Pipe %d is 0.\n",
+			__func__, pipe);
+		return;
+	}
+
+	SPS_DUMP("\nsps:<desc-begin> --- descriptor FIFO of Pipe %d -----\n\n",
+			pipe);
+
+	SPS_DUMP("BAM_P_DESC_FIFO_ADDR: 0x%x\n"
+		"BAM_P_DESC_FIFO_SIZE: 0x%x (%d)\n\n",
+		desc_fifo_addr, desc_fifo_size, desc_fifo_size);
+
+	desc_fifo = (u32 *) phys_to_virt(desc_fifo_addr);
+
+	if (option == 100) {
+		SPS_DUMP("%s",
+			"----- start of data blocks -----\n");
+		for (i = 0; i < desc_fifo_size; i += 8) {
+			u32 *data_block_vir;
+			u32 data_block_phy = desc_fifo[i / 4];
+
+			if (data_block_phy) {
+				data_block_vir =
+					(u32 *) phys_to_virt(data_block_phy);
+
+				SPS_DUMP("desc addr:0x%x; data addr:0x%x:\n",
+					desc_fifo_addr + i, data_block_phy);
+				SPS_DUMP("0x%x, 0x%x, 0x%x, 0x%x\n",
+					data_block_vir[0], data_block_vir[1],
+					data_block_vir[2], data_block_vir[3]);
+				SPS_DUMP("0x%x, 0x%x, 0x%x, 0x%x\n",
+					data_block_vir[4], data_block_vir[5],
+					data_block_vir[6], data_block_vir[7]);
+				SPS_DUMP("0x%x, 0x%x, 0x%x, 0x%x\n",
+					data_block_vir[8], data_block_vir[9],
+					data_block_vir[10], data_block_vir[11]);
+				SPS_DUMP("0x%x, 0x%x, 0x%x, 0x%x\n\n",
+					data_block_vir[12], data_block_vir[13],
+					data_block_vir[14], data_block_vir[15]);
+			}
+		}
+		SPS_DUMP("%s",
+			"----- end of data blocks -----\n");
+	} else if (option) {
+		u32 size = option * 128;
+		u32 current_desc = bam_pipe_get_desc_read_offset(base,
+								pipe_index);
+		u32 begin = 0;
+		u32 end = desc_fifo_size;
+
+		if (current_desc > size / 2)
+			begin = current_desc - size / 2;
+
+		if (desc_fifo_size > current_desc + size / 2)
+			end = current_desc + size / 2;
+
+		SPS_DUMP("%s",
+			"------------ begin of partial FIFO ------------\n\n");
+
+		SPS_DUMP("%s",
+			"desc addr; desc content; desc flags\n");
+		for (i = begin; i < end; i += 0x8) {
+			u32 offset;
+			u32 flags = desc_fifo[(i / 4) + 1] >> 16;
+
+			memset(desc_info, 0, sizeof(desc_info));
+			offset = scnprintf(desc_info, 40, "0x%x: 0x%x, 0x%x: ",
+				desc_fifo_addr + i,
+				desc_fifo[i / 4], desc_fifo[(i / 4) + 1]);
+
+			if (flags & SPS_IOVEC_FLAG_INT)
+				offset += scnprintf(desc_info + offset, 5,
+							"INT ");
+			if (flags & SPS_IOVEC_FLAG_EOT)
+				offset += scnprintf(desc_info + offset, 5,
+							"EOT ");
+			if (flags & SPS_IOVEC_FLAG_EOB)
+				offset += scnprintf(desc_info + offset, 5,
+							"EOB ");
+			if (flags & SPS_IOVEC_FLAG_NWD)
+				offset += scnprintf(desc_info + offset, 5,
+							"NWD ");
+			if (flags & SPS_IOVEC_FLAG_CMD)
+				offset += scnprintf(desc_info + offset, 5,
+							"CMD ");
+			if (flags & SPS_IOVEC_FLAG_LOCK)
+				offset += scnprintf(desc_info + offset, 5,
+							"LCK ");
+			if (flags & SPS_IOVEC_FLAG_UNLOCK)
+				offset += scnprintf(desc_info + offset, 5,
+							"UNL ");
+			if (flags & SPS_IOVEC_FLAG_IMME)
+				offset += scnprintf(desc_info + offset, 5,
+							"IMM ");
+
+			SPS_DUMP("%s\n", desc_info);
+		}
+
+		SPS_DUMP("%s",
+			"\n------------  end of partial FIFO  ------------\n");
+	} else {
+		SPS_DUMP("%s",
+			"---------------- begin of FIFO ----------------\n\n");
+
+		for (i = 0; i < desc_fifo_size; i += 0x10)
+			SPS_DUMP("addr 0x%x: 0x%x, 0x%x, 0x%x, 0x%x.\n",
+				desc_fifo_addr + i,
+				desc_fifo[i / 4], desc_fifo[(i / 4) + 1],
+				desc_fifo[(i / 4) + 2], desc_fifo[(i / 4) + 3]);
+
+		SPS_DUMP("%s",
+			"\n----------------  end of FIFO  ----------------\n");
+	}
+
+	SPS_DUMP("\nsps:<desc-end> --- descriptor FIFO of Pipe %d -----\n\n",
+			pipe);
+}
+
+/* output BAM_TEST_BUS_REG with specified TEST_BUS_SEL */
+void print_bam_test_bus_reg(void *base, u32 tb_sel)
+{
+	u32 i;
+	u32 test_bus_selection[] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+			0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
+			0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+			0x20, 0x21, 0x22, 0x23,
+			0x41, 0x42, 0x43, 0x44, 0x45, 0x46};
+	u32 size = sizeof(test_bus_selection) / sizeof(u32);
+
+	if (base == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is NULL.\n", __func__);
+		return;
+	}
+
+	if (tb_sel) {
+		SPS_DUMP("\nsps:Specified TEST_BUS_SEL value: 0x%x\n", tb_sel);
+		bam_write_reg_field(base, TEST_BUS_SEL, 0, BAM_TESTBUS_SEL,
+					tb_sel);
+		SPS_DUMP("sps:BAM_TEST_BUS_REG:0x%x for TEST_BUS_SEL:0x%x\n\n",
+			bam_read_reg(base, TEST_BUS_REG, 0),
+			bam_read_reg_field(base, TEST_BUS_SEL, 0,
+						BAM_TESTBUS_SEL));
+	}
+
+	SPS_DUMP("%s", "\nsps:<testbus-begin> --- BAM TEST_BUS dump -----\n\n");
+
+	/* output other selections */
+	for (i = 0; i < size; i++) {
+		bam_write_reg_field(base, TEST_BUS_SEL, 0, BAM_TESTBUS_SEL,
+					test_bus_selection[i]);
+
+		SPS_DUMP("sps:TEST_BUS_REG:0x%x\t  TEST_BUS_SEL:0x%x\n",
+			bam_read_reg(base, TEST_BUS_REG, 0),
+			bam_read_reg_field(base, TEST_BUS_SEL, 0,
+					BAM_TESTBUS_SEL));
+	}
+
+	SPS_DUMP("%s", "\nsps:<testbus-end> --- BAM TEST_BUS dump -----\n\n");
+}
diff --git a/drivers/platform/msm/sps/bam.h b/drivers/platform/msm/sps/bam.h
new file mode 100644
index 0000000..1d8aab0
--- /dev/null
+++ b/drivers/platform/msm/sps/bam.h
@@ -0,0 +1,447 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Bus-Access-Manager (BAM) Hardware manager functions API. */
+
+#ifndef _BAM_H_
+#define _BAM_H_
+
+#include <linux/types.h>	/* u32 */
+#include <linux/io.h>		/* ioread32() */
+#include <linux/bitops.h>	/* find_first_bit() */
+#include "spsi.h"
+
+/* Pipe mode */
+enum bam_pipe_mode {
+	BAM_PIPE_MODE_BAM2BAM = 0,	/* BAM to BAM */
+	BAM_PIPE_MODE_SYSTEM = 1,	/* BAM to/from System Memory */
+};
+
+/* Pipe direction */
+enum bam_pipe_dir {
+	/* The Pipe Reads data from data-fifo or system-memory */
+	BAM_PIPE_CONSUMER = 0,
+	/* The Pipe Writes data to data-fifo or system-memory */
+	BAM_PIPE_PRODUCER = 1,
+};
+
+/* Stream mode Type */
+enum bam_stream_mode {
+	BAM_STREAM_MODE_DISABLE = 0,
+	BAM_STREAM_MODE_ENABLE = 1,
+};
+
+/* NWD written Type */
+enum bam_write_nwd {
+	BAM_WRITE_NWD_DISABLE = 0,
+	BAM_WRITE_NWD_ENABLE = 1,
+};
+
+
+/* Enable Type */
+enum bam_enable {
+	BAM_DISABLE = 0,
+	BAM_ENABLE = 1,
+};
+
+/* Pipe timer mode */
+enum bam_pipe_timer_mode {
+	BAM_PIPE_TIMER_ONESHOT = 0,
+	BAM_PIPE_TIMER_PERIODIC = 1,
+};
+
+struct transfer_descriptor {
+	u32 addr;	/* Buffer physical address */
+	u32 size:16;	/* Buffer size in bytes */
+	u32 flags:16;	/* Flag bitmask (see SPS_IOVEC_FLAG_ #defines) */
+}  __packed;
+
+/* BAM pipe initialization parameters */
+struct bam_pipe_parameters {
+	u16 event_threshold;
+	u32 pipe_irq_mask;
+	enum bam_pipe_dir dir;
+	enum bam_pipe_mode mode;
+	enum bam_write_nwd write_nwd;
+	phys_addr_t desc_base;	/* Physical address of descriptor FIFO */
+	u32 desc_size;	/* Size (bytes) of descriptor FIFO */
+	u32 lock_group;	/* The lock group this pipe belongs to */
+	enum bam_stream_mode stream_mode;
+	u32 ee;		/* BAM execution environment index */
+
+	/* The following are only valid if mode is BAM2BAM */
+	u32 peer_phys_addr;
+	u32 peer_pipe;
+	phys_addr_t data_base;	/* Physical address of data FIFO */
+	u32 data_size;	/* Size (bytes) of data FIFO */
+};
+
+/**
+ * Initialize a BAM device
+ *
+ * This function initializes a BAM device.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @summing_threshold - summing threshold (global for all pipes)
+ *
+ * @irq_mask - error interrupts mask
+ *
+ * @version - return BAM hardware version
+ *
+ * @num_pipes - return number of pipes
+ *
+ * @options - BAM configuration options
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_init(void *base,
+		u32 ee,
+		u16 summing_threshold,
+		u32 irq_mask, u32 *version,
+		u32 *num_pipes, u32 options);
+
+/**
+ * Initialize BAM device security execution environment
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @vmid - virtual master identifier
+ *
+ * @pipe_mask - bit mask of pipes to assign to EE
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_security_init(void *base, u32 ee, u32 vmid, u32 pipe_mask);
+
+/**
+ * Check a BAM device
+ *
+ * This function verifies that a BAM device is enabled and gathers
+ *    the hardware configuration.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @version - return BAM hardware version
+ *
+ * @ee - BAM execution environment index
+ *
+ * @num_pipes - return number of pipes
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_check(void *base, u32 *version, u32 ee, u32 *num_pipes);
+
+/**
+ * Disable a BAM device
+ *
+ * This function disables a BAM device.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_exit(void *base, u32 ee);
+
+/**
+ * This function prints BAM register content
+ * including TEST_BUS and PIPE register content.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ */
+void bam_output_register_content(void *base, u32 ee);
+
+
+/**
+ * Get BAM IRQ source and clear global IRQ status
+ *
+ * This function gets BAM IRQ source.
+ * Clear global IRQ status if it is non-zero.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @mask - active pipes mask.
+ *
+ * @case - callback case.
+ *
+ * @return IRQ status
+ *
+ */
+u32 bam_check_irq_source(void *base, u32 ee, u32 mask,
+				enum sps_callback_case *cb_case);
+
+
+/**
+ * Initialize a BAM pipe
+ *
+ * This function initializes a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @param - bam pipe parameters.
+ *
+ * @ee - BAM execution environment index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int bam_pipe_init(void *base, u32 pipe, struct bam_pipe_parameters *param,
+					u32 ee);
+
+/**
+ * Reset the BAM pipe
+ *
+ * This function resets the BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_pipe_exit(void *base, u32 pipe, u32 ee);
+
+/**
+ * Enable a BAM pipe
+ *
+ * This function enables a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ */
+void bam_pipe_enable(void *base, u32 pipe);
+
+/**
+ * Disable a BAM pipe
+ *
+ * This function disables a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ */
+void bam_pipe_disable(void *base, u32 pipe);
+
+/**
+ * Get a BAM pipe enable state
+ *
+ * This function determines if a BAM pipe is enabled.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return true if enabled, false if disabled
+ *
+ */
+int bam_pipe_is_enabled(void *base, u32 pipe);
+
+/**
+ * Configure interrupt for a BAM pipe
+ *
+ * This function configures the interrupt for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @irq_en - enable or disable interrupt
+ *
+ * @src_mask - interrupt source mask, set regardless of whether
+ *    interrupt is disabled
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_pipe_set_irq(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 ee);
+
+/**
+ * Configure a BAM pipe for satellite MTI use
+ *
+ * This function configures a BAM pipe for satellite MTI use.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @irq_gen_addr - physical address written to generate MTI
+ *
+ * @ee - BAM execution environment index
+ *
+ */
+void bam_pipe_satellite_mti(void *base, u32 pipe, u32 irq_gen_addr, u32 ee);
+
+/**
+ * Configure MTI for a BAM pipe
+ *
+ * This function configures the interrupt for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @irq_en - enable or disable interrupt
+ *
+ * @src_mask - interrupt source mask, set regardless of whether
+ *    interrupt is disabled
+ *
+ * @irq_gen_addr - physical address written to generate MTI
+ *
+ */
+void bam_pipe_set_mti(void *base, u32 pipe, enum bam_enable irq_en,
+		      u32 src_mask, u32 irq_gen_addr);
+
+/**
+ * Get and Clear BAM pipe IRQ status
+ *
+ * This function gets and clears BAM pipe IRQ status.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return IRQ status
+ *
+ */
+u32 bam_pipe_get_and_clear_irq_status(void *base, u32 pipe);
+
+/**
+ * Set write offset for a BAM pipe
+ *
+ * This function sets the write offset for a BAM pipe.  This is
+ *    the offset that is maintained by software in system mode.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @next_write - descriptor FIFO write offset
+ *
+ */
+void bam_pipe_set_desc_write_offset(void *base, u32 pipe, u32 next_write);
+
+/**
+ * Get write offset for a BAM pipe
+ *
+ * This function gets the write offset for a BAM pipe.  This is
+ *    the offset that is maintained by the pipe's peer pipe or by software.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return descriptor FIFO write offset
+ *
+ */
+u32 bam_pipe_get_desc_write_offset(void *base, u32 pipe);
+
+/**
+ * Get read offset for a BAM pipe
+ *
+ * This function gets the read offset for a BAM pipe.  This is
+ *    the offset that is maintained by the pipe in system mode.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return descriptor FIFO read offset
+ *
+ */
+u32 bam_pipe_get_desc_read_offset(void *base, u32 pipe);
+
+/**
+ * Configure inactivity timer count for a BAM pipe
+ *
+ * This function configures the inactivity timer count for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @mode - timer operating mode
+ *
+ * @timeout_count - timeout count
+ *
+ */
+void bam_pipe_timer_config(void *base, u32 pipe,
+			   enum bam_pipe_timer_mode mode,
+			   u32 timeout_count);
+
+/**
+ * Reset inactivity timer for a BAM pipe
+ *
+ * This function resets the inactivity timer count for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ */
+void bam_pipe_timer_reset(void *base, u32 pipe);
+
+/**
+ * Get inactivity timer count for a BAM pipe
+ *
+ * This function gets the inactivity timer count for a BAM pipe.
+ *
+ * @base - BAM virtual base address.
+ *
+ * @pipe - pipe index
+ *
+ * @return inactivity timer count
+ *
+ */
+u32 bam_pipe_timer_get_count(void *base, u32 pipe);
+
+/*
+ * bam_pipe_check_zlt - Check if the last desc is ZLT.
+ * @base:	BAM virtual address
+ * @pipe:	pipe index
+ *
+ * This function checks if the last desc in the desc FIFO is a ZLT desc.
+ *
+ * @return true if the last desc in the desc FIFO is a ZLT desc. Otherwise
+ *  return false.
+ */
+bool bam_pipe_check_zlt(void *base, u32 pipe);
+
+/*
+ * bam_pipe_check_pipe_empty - Check if desc FIFO is empty.
+ * @base:	BAM virtual address
+ * @pipe:	pipe index
+ *
+ * This function checks if the desc FIFO of this pipe is empty.
+ *
+ * @return true if desc FIFO is empty. Otherwise return false.
+ */
+bool bam_pipe_check_pipe_empty(void *base, u32 pipe);
+#endif				/* _BAM_H_ */
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
new file mode 100644
index 0000000..907c94e8
--- /dev/null
+++ b/drivers/platform/msm/sps/sps.c
@@ -0,0 +1,3064 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Smart-Peripheral-Switch (SPS) Module. */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/module.h>	/* module_init() */
+#include <linux/slab.h>		/* kzalloc() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/device.h>	/* device */
+#include <linux/fs.h>		/* alloc_chrdev_region() */
+#include <linux/list.h>		/* list_head */
+#include <linux/memory.h>	/* memset */
+#include <linux/io.h>		/* ioremap() */
+#include <linux/clk.h>		/* clk_enable() */
+#include <linux/platform_device.h>	/* platform_get_resource_byname() */
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "sps_bam.h"
+#include "spsi.h"
+#include "sps_core.h"
+
+#define SPS_DRV_NAME "msm_sps"	/* must match the platform_device name */
+
+/**
+ *  SPS driver state
+ */
+struct sps_drv *sps;
+
+u32 d_type;
+bool enhd_pipe;
+bool imem;
+enum sps_bam_type bam_type;
+enum sps_bam_type bam_types[] = {SPS_BAM_LEGACY, SPS_BAM_NDP, SPS_BAM_NDP_4K};
+
+static void sps_device_de_init(void);
+
+#ifdef CONFIG_DEBUG_FS
+u8 debugfs_record_enabled;
+u8 logging_option;
+u8 debug_level_option;
+u8 print_limit_option;
+u8 reg_dump_option;
+u32 testbus_sel;
+u32 bam_pipe_sel;
+u32 desc_option;
+/*
+ * Specifies range of log level from level 0 to level 3 to have fine-granularity
+ * for logging to serve all BAM use cases.
+ */
+u32 log_level_sel;
+
+static char *debugfs_buf;
+static u32 debugfs_buf_size;
+static u32 debugfs_buf_used;
+static int wraparound;
+
+struct dentry *dent;
+struct dentry *dfile_info;
+struct dentry *dfile_logging_option;
+struct dentry *dfile_debug_level_option;
+struct dentry *dfile_print_limit_option;
+struct dentry *dfile_reg_dump_option;
+struct dentry *dfile_testbus_sel;
+struct dentry *dfile_bam_pipe_sel;
+struct dentry *dfile_desc_option;
+struct dentry *dfile_bam_addr;
+struct dentry *dfile_log_level_sel;
+
+static struct sps_bam *phy2bam(phys_addr_t phys_addr);
+
+/* record debug info for debugfs */
+void sps_debugfs_record(const char *msg)
+{
+	if (debugfs_record_enabled) {
+		if (debugfs_buf_used + MAX_MSG_LEN >= debugfs_buf_size) {
+			debugfs_buf_used = 0;
+			wraparound = true;
+		}
+		debugfs_buf_used += scnprintf(debugfs_buf + debugfs_buf_used,
+				debugfs_buf_size - debugfs_buf_used, msg);
+
+		if (wraparound)
+			scnprintf(debugfs_buf + debugfs_buf_used,
+					debugfs_buf_size - debugfs_buf_used,
+					"\n**** end line of sps log ****\n\n");
+	}
+}
+
+/* read the recorded debug info to userspace */
+static ssize_t sps_read_info(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int ret = 0;
+	int size;
+
+	if (debugfs_record_enabled) {
+		if (wraparound)
+			size = debugfs_buf_size - MAX_MSG_LEN;
+		else
+			size = debugfs_buf_used;
+
+		ret = simple_read_from_buffer(ubuf, count, ppos,
+				debugfs_buf, size);
+	}
+
+	return ret;
+}
+
+/*
+ * set the buffer size (in KB) for debug info
+ */
+static ssize_t sps_set_info(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	char str[MAX_MSG_LEN];
+	int i;
+	u32 buf_size_kb = 0;
+	u32 new_buf_size;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, sizeof(str));
+	missing = copy_from_user(str, buf, size);
+	if (missing)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		buf_size_kb = (buf_size_kb * 10) + (str[i] - '0');
+
+	pr_info("sps:debugfs: input buffer size is %dKB\n", buf_size_kb);
+
+	if ((logging_option == 0) || (logging_option == 2)) {
+		pr_info("sps:debugfs: need to first turn on recording.\n");
+		return -EFAULT;
+	}
+
+	if (buf_size_kb < 1) {
+		pr_info("sps:debugfs:buffer size should be no less than 1KB\n");
+		return -EFAULT;
+	}
+
+	if (buf_size_kb > (INT_MAX/SZ_1K)) {
+		pr_err("sps:debugfs: buffer size is too large\n");
+		return -EFAULT;
+	}
+
+	new_buf_size = buf_size_kb * SZ_1K;
+
+	if (debugfs_record_enabled) {
+		if (debugfs_buf_size == new_buf_size) {
+			/* need do nothing */
+			pr_info(
+				"sps:debugfs: input buffer size is the same as before.\n"
+				);
+			return count;
+		}
+		/* release the current buffer */
+		debugfs_record_enabled = false;
+		debugfs_buf_used = 0;
+		wraparound = false;
+		kfree(debugfs_buf);
+		debugfs_buf = NULL;
+	}
+
+	/* allocate new buffer */
+	debugfs_buf_size = new_buf_size;
+
+	debugfs_buf = kzalloc(debugfs_buf_size,	GFP_KERNEL);
+	if (!debugfs_buf) {
+		debugfs_buf_size = 0;
+		pr_err("sps:fail to allocate memory for debug_fs.\n");
+		return -ENOMEM;
+	}
+
+	debugfs_buf_used = 0;
+	wraparound = false;
+	debugfs_record_enabled = true;
+
+	return count;
+}
+
+const struct file_operations sps_info_ops = {
+	.read = sps_read_info,
+	.write = sps_set_info,
+};
+
+/* return the current logging option to userspace */
+static ssize_t sps_read_logging_option(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	char value[MAX_MSG_LEN];
+	int nbytes;
+
+	nbytes = snprintf(value, MAX_MSG_LEN, "%d\n", logging_option);
+
+	return simple_read_from_buffer(ubuf, count, ppos, value, nbytes);
+}
+
+/*
+ * set the logging option
+ */
+static ssize_t sps_set_logging_option(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	char str[MAX_MSG_LEN];
+	int i;
+	u8 option = 0;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, sizeof(str));
+	missing = copy_from_user(str, buf, size);
+	if (missing)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		option = (option * 10) + (str[i] - '0');
+
+	pr_info("sps:debugfs: try to change logging option to %d\n", option);
+
+	if (option > 3) {
+		pr_err("sps:debugfs: invalid logging option:%d\n", option);
+		return count;
+	}
+
+	if (((option == 0) || (option == 2)) &&
+		((logging_option == 1) || (logging_option == 3))) {
+		debugfs_record_enabled = false;
+		kfree(debugfs_buf);
+		debugfs_buf = NULL;
+		debugfs_buf_used = 0;
+		debugfs_buf_size = 0;
+		wraparound = false;
+	}
+
+	logging_option = option;
+
+	return count;
+}
+
+const struct file_operations sps_logging_option_ops = {
+	.read = sps_read_logging_option,
+	.write = sps_set_logging_option,
+};
+
+/*
+ * input the bam physical address
+ */
+static ssize_t sps_set_bam_addr(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	char str[MAX_MSG_LEN];
+	u32 i;
+	u32 bam_addr = 0;
+	struct sps_bam *bam;
+	u32 num_pipes = 0;
+	void *vir_addr;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
+
+	memset(str, 0, sizeof(str));
+	missing = copy_from_user(str, buf, size);
+	if (missing)
+		return -EFAULT;
+
+	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+		bam_addr = (bam_addr * 10) + (str[i] - '0');
+
+	pr_info("sps:debugfs:input BAM physical address:0x%x\n", bam_addr);
+
+	bam = phy2bam(bam_addr);
+
+	if (bam == NULL) {
+		pr_err("sps:debugfs:BAM 0x%x is not registered.", bam_addr);
+		return count;
+	}
+	vir_addr = &bam->base;
+	num_pipes = bam->props.num_pipes;
+	if (log_level_sel <= SPS_IPC_MAX_LOGLEVEL)
+		bam->ipc_loglevel = log_level_sel;
+
+	switch (reg_dump_option) {
+	case 1: /* output all registers of this BAM */
+		print_bam_reg(bam->base);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_reg(bam->base, i);
+		break;
+	case 2: /* output BAM-level registers */
+		print_bam_reg(bam->base);
+		break;
+	case 3: /* output selected BAM-level registers */
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		break;
+	case 4: /* output selected registers of all pipes */
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 5: /* output selected registers of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 6: /* output selected registers of typical pipes */
+		print_bam_pipe_selected_reg(vir_addr, 4);
+		print_bam_pipe_selected_reg(vir_addr, 5);
+		break;
+	case 7: /* output desc FIFO of all pipes */
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 8: /* output desc FIFO of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 9: /* output desc FIFO of typical pipes */
+		print_bam_pipe_desc_fifo(vir_addr, 4, 0);
+		print_bam_pipe_desc_fifo(vir_addr, 5, 0);
+		break;
+	case 10: /* output selected registers and desc FIFO of all pipes */
+		for (i = 0; i < num_pipes; i++) {
+			print_bam_pipe_selected_reg(vir_addr, i);
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		}
+		break;
+	case 11: /* output selected registers and desc FIFO of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+			}
+		break;
+	case 12: /* output selected registers and desc FIFO of typical pipes */
+		print_bam_pipe_selected_reg(vir_addr, 4);
+		print_bam_pipe_desc_fifo(vir_addr, 4, 0);
+		print_bam_pipe_selected_reg(vir_addr, 5);
+		print_bam_pipe_desc_fifo(vir_addr, 5, 0);
+		break;
+	case 13: /* output BAM_TEST_BUS_REG */
+		if (testbus_sel)
+			print_bam_test_bus_reg(vir_addr, testbus_sel);
+		else {
+			pr_info("sps:output TEST_BUS_REG for all TEST_BUS_SEL");
+			print_bam_test_bus_reg(vir_addr, testbus_sel);
+		}
+		break;
+	case 14: /* output partial desc FIFO of selected pipes */
+		if (desc_option == 0)
+			desc_option = 1;
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_option);
+		break;
+	case 15: /* output partial data blocks of descriptors */
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	case 16: /* output all registers of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_reg(bam->base, i);
+		break;
+	case 91: /*
+		  * output testbus register, BAM global regisers
+		  * and registers of all pipes
+		  */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 92: /*
+		  * output testbus register, BAM global regisers
+		  * and registers of selected pipes
+		  */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 93: /*
+		  * output registers and partial desc FIFOs
+		  * of selected pipes: format 1
+		  */
+		if (desc_option == 0)
+			desc_option = 1;
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_option);
+		break;
+	case 94: /*
+		  * output registers and partial desc FIFOs
+		  * of selected pipes: format 2
+		  */
+		if (desc_option == 0)
+			desc_option = 1;
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_option);
+			}
+		break;
+	case 95: /*
+		  * output registers and desc FIFOs
+		  * of selected pipes: format 1
+		  */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 96: /*
+		  * output registers and desc FIFOs
+		  * of selected pipes: format 2
+		  */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+			}
+		break;
+	case 97: /*
+		  * output registers, desc FIFOs and partial data blocks
+		  * of selected pipes: format 1
+		  */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	case 98: /*
+		  * output registers, desc FIFOs and partial data blocks
+		  * of selected pipes: format 2
+		  */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (bam_pipe_sel & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+			}
+		break;
+	case 99: /* output all registers, desc FIFOs and partial data blocks */
+		print_bam_test_bus_reg(vir_addr, testbus_sel);
+		print_bam_reg(bam->base);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_reg(bam->base, i);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	default:
+		pr_info("sps:no dump option is chosen yet.");
+	}
+
+	return count;
+}
+
+const struct file_operations sps_bam_addr_ops = {
+	.write = sps_set_bam_addr,
+};
+
+static void sps_debugfs_init(void)
+{
+	debugfs_record_enabled = false;
+	logging_option = 0;
+	debug_level_option = 0;
+	print_limit_option = 0;
+	reg_dump_option = 0;
+	testbus_sel = 0;
+	bam_pipe_sel = 0;
+	desc_option = 0;
+	debugfs_buf_size = 0;
+	debugfs_buf_used = 0;
+	wraparound = false;
+	log_level_sel = SPS_IPC_MAX_LOGLEVEL + 1;
+
+	dent = debugfs_create_dir("sps", 0);
+	if (IS_ERR(dent)) {
+		pr_err("sps:fail to create the folder for debug_fs.\n");
+		return;
+	}
+
+	dfile_info = debugfs_create_file("info", 0664, dent, 0,
+			&sps_info_ops);
+	if (!dfile_info || IS_ERR(dfile_info)) {
+		pr_err("sps:fail to create the file for debug_fs info.\n");
+		goto info_err;
+	}
+
+	dfile_logging_option = debugfs_create_file("logging_option", 0664,
+			dent, 0, &sps_logging_option_ops);
+	if (!dfile_logging_option || IS_ERR(dfile_logging_option)) {
+		pr_err("sps:fail to create debug_fs for logging_option.\n");
+		goto logging_option_err;
+	}
+
+	dfile_debug_level_option = debugfs_create_u8("debug_level_option",
+					0664, dent, &debug_level_option);
+	if (!dfile_debug_level_option || IS_ERR(dfile_debug_level_option)) {
+		pr_err("sps:fail to create debug_fs for debug_level_option.\n");
+		goto debug_level_option_err;
+	}
+
+	dfile_print_limit_option = debugfs_create_u8("print_limit_option",
+					0664, dent, &print_limit_option);
+	if (!dfile_print_limit_option || IS_ERR(dfile_print_limit_option)) {
+		pr_err("sps:fail to create debug_fs for print_limit_option.\n");
+		goto print_limit_option_err;
+	}
+
+	dfile_reg_dump_option = debugfs_create_u8("reg_dump_option", 0664,
+						dent, &reg_dump_option);
+	if (!dfile_reg_dump_option || IS_ERR(dfile_reg_dump_option)) {
+		pr_err("sps:fail to create debug_fs for reg_dump_option.\n");
+		goto reg_dump_option_err;
+	}
+
+	dfile_testbus_sel = debugfs_create_u32("testbus_sel", 0664,
+						dent, &testbus_sel);
+	if (!dfile_testbus_sel || IS_ERR(dfile_testbus_sel)) {
+		pr_err("sps:fail to create debug_fs file for testbus_sel.\n");
+		goto testbus_sel_err;
+	}
+
+	dfile_bam_pipe_sel = debugfs_create_u32("bam_pipe_sel", 0664,
+						dent, &bam_pipe_sel);
+	if (!dfile_bam_pipe_sel || IS_ERR(dfile_bam_pipe_sel)) {
+		pr_err("sps:fail to create debug_fs file for bam_pipe_sel.\n");
+		goto bam_pipe_sel_err;
+	}
+
+	dfile_desc_option = debugfs_create_u32("desc_option", 0664,
+						dent, &desc_option);
+	if (!dfile_desc_option || IS_ERR(dfile_desc_option)) {
+		pr_err("sps:fail to create debug_fs file for desc_option.\n");
+		goto desc_option_err;
+	}
+
+	dfile_bam_addr = debugfs_create_file("bam_addr", 0664,
+			dent, 0, &sps_bam_addr_ops);
+	if (!dfile_bam_addr || IS_ERR(dfile_bam_addr)) {
+		pr_err("sps:fail to create the file for debug_fs bam_addr.\n");
+		goto bam_addr_err;
+	}
+
+	dfile_log_level_sel = debugfs_create_u32("log_level_sel", 0664,
+						dent, &log_level_sel);
+	if (!dfile_log_level_sel || IS_ERR(dfile_log_level_sel)) {
+		pr_err("sps:fail to create debug_fs file for log_level_sel.\n");
+		goto bam_log_level_err;
+	}
+
+	return;
+
+bam_log_level_err:
+	debugfs_remove(dfile_bam_addr);
+bam_addr_err:
+	debugfs_remove(dfile_desc_option);
+desc_option_err:
+	debugfs_remove(dfile_bam_pipe_sel);
+bam_pipe_sel_err:
+	debugfs_remove(dfile_testbus_sel);
+testbus_sel_err:
+	debugfs_remove(dfile_reg_dump_option);
+reg_dump_option_err:
+	debugfs_remove(dfile_print_limit_option);
+print_limit_option_err:
+	debugfs_remove(dfile_debug_level_option);
+debug_level_option_err:
+	debugfs_remove(dfile_logging_option);
+logging_option_err:
+	debugfs_remove(dfile_info);
+info_err:
+	debugfs_remove(dent);
+}
+
+static void sps_debugfs_exit(void)
+{
+	debugfs_remove(dfile_info);
+	debugfs_remove(dfile_logging_option);
+	debugfs_remove(dfile_debug_level_option);
+	debugfs_remove(dfile_print_limit_option);
+	debugfs_remove(dfile_reg_dump_option);
+	debugfs_remove(dfile_testbus_sel);
+	debugfs_remove(dfile_bam_pipe_sel);
+	debugfs_remove(dfile_desc_option);
+	debugfs_remove(dfile_bam_addr);
+	debugfs_remove(dent);
+	debugfs_remove(dfile_log_level_sel);
+	kfree(debugfs_buf);
+	debugfs_buf = NULL;
+}
+#endif
+
+/* Get the debug info of BAM registers and descriptor FIFOs */
+int sps_get_bam_debug_info(unsigned long dev, u32 option, u32 para,
+		u32 tb_sel, u32 desc_sel)
+{
+	int res = 0;
+	struct sps_bam *bam;
+	u32 i;
+	u32 num_pipes = 0;
+	void *vir_addr;
+
+	if (dev == 0) {
+		SPS_ERR(sps,
+			"sps:%s:device handle should not be 0.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL || !sps->is_ready) {
+		SPS_DBG3(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	mutex_lock(&sps->lock);
+	/* Search for the target BAM device */
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		pr_err("sps:Can't find any BAM with handle 0x%lx.", dev);
+		mutex_unlock(&sps->lock);
+		return SPS_ERROR;
+	}
+	mutex_unlock(&sps->lock);
+
+	vir_addr = &bam->base;
+	num_pipes = bam->props.num_pipes;
+
+	SPS_DUMP("sps:<bam-addr> dump BAM:%pa.\n", &bam->props.phys_addr);
+
+	switch (option) {
+	case 1: /* output all registers of this BAM */
+		print_bam_reg(bam->base);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_reg(bam->base, i);
+		break;
+	case 2: /* output BAM-level registers */
+		print_bam_reg(bam->base);
+		break;
+	case 3: /* output selected BAM-level registers */
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		break;
+	case 4: /* output selected registers of all pipes */
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 5: /* output selected registers of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 6: /* output selected registers of typical pipes */
+		print_bam_pipe_selected_reg(vir_addr, 4);
+		print_bam_pipe_selected_reg(vir_addr, 5);
+		break;
+	case 7: /* output desc FIFO of all pipes */
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 8: /* output desc FIFO of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 9: /* output desc FIFO of typical pipes */
+		print_bam_pipe_desc_fifo(vir_addr, 4, 0);
+		print_bam_pipe_desc_fifo(vir_addr, 5, 0);
+		break;
+	case 10: /* output selected registers and desc FIFO of all pipes */
+		for (i = 0; i < num_pipes; i++) {
+			print_bam_pipe_selected_reg(vir_addr, i);
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		}
+		break;
+	case 11: /* output selected registers and desc FIFO of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+			}
+		break;
+	case 12: /* output selected registers and desc FIFO of typical pipes */
+		print_bam_pipe_selected_reg(vir_addr, 4);
+		print_bam_pipe_desc_fifo(vir_addr, 4, 0);
+		print_bam_pipe_selected_reg(vir_addr, 5);
+		print_bam_pipe_desc_fifo(vir_addr, 5, 0);
+		break;
+	case 13: /* output BAM_TEST_BUS_REG */
+		if (tb_sel)
+			print_bam_test_bus_reg(vir_addr, tb_sel);
+		else
+			pr_info("sps:TEST_BUS_SEL should NOT be zero.");
+		break;
+	case 14: /* output partial desc FIFO of selected pipes */
+		if (desc_sel == 0)
+			desc_sel = 1;
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_sel);
+		break;
+	case 15: /* output partial data blocks of descriptors */
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	case 16: /* output all registers of selected pipes */
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_reg(bam->base, i);
+		break;
+	case 91: /*
+		  * output testbus register, BAM global regisers
+		  * and registers of all pipes
+		  */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 92: /*
+		  * output testbus register, BAM global regisers
+		  * and registers of selected pipes
+		  */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		break;
+	case 93: /*
+		  * output registers and partial desc FIFOs
+		  * of selected pipes: format 1
+		  */
+		if (desc_sel == 0)
+			desc_sel = 1;
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_sel);
+		break;
+	case 94: /*
+		  * output registers and partial desc FIFOs
+		  * of selected pipes: format 2
+		  */
+		if (desc_sel == 0)
+			desc_sel = 1;
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i,
+							desc_sel);
+			}
+		break;
+	case 95: /*
+		  * output registers and desc FIFOs
+		  * of selected pipes: format 1
+		  */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		break;
+	case 96: /*
+		  * output registers and desc FIFOs
+		  * of selected pipes: format 2
+		  */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+			}
+		break;
+	case 97: /*
+		  * output registers, desc FIFOs and partial data blocks
+		  * of selected pipes: format 1
+		  */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i))
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	case 98: /*
+		  * output registers, desc FIFOs and partial data blocks
+		  * of selected pipes: format 2
+		  */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			if (para & (1UL << i)) {
+				print_bam_pipe_selected_reg(vir_addr, i);
+				print_bam_pipe_desc_fifo(vir_addr, i, 0);
+				print_bam_pipe_desc_fifo(vir_addr, i, 100);
+			}
+		break;
+	case 99: /* output all registers, desc FIFOs and partial data blocks */
+		print_bam_test_bus_reg(vir_addr, tb_sel);
+		print_bam_reg(bam->base);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_reg(bam->base, i);
+		print_bam_selected_reg(vir_addr, bam->props.ee);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_selected_reg(vir_addr, i);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 0);
+		for (i = 0; i < num_pipes; i++)
+			print_bam_pipe_desc_fifo(vir_addr, i, 100);
+		break;
+	default:
+		pr_info("sps:no option is chosen yet.");
+	}
+
+	return res;
+}
+EXPORT_SYMBOL(sps_get_bam_debug_info);
+
+/**
+ * Initialize SPS device
+ *
+ * This function initializes the SPS device.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_device_init(void)
+{
+	int result;
+	int success;
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	struct sps_bam_props bamdma_props = {0};
+#endif
+
+	SPS_DBG3(sps, "sps:%s.", __func__);
+
+	success = false;
+
+	result = sps_mem_init(sps->pipemem_phys_base, sps->pipemem_size);
+	if (result) {
+		SPS_ERR(sps, "sps:%s:SPS memory init failed", __func__);
+		goto exit_err;
+	}
+
+	INIT_LIST_HEAD(&sps->bams_q);
+	mutex_init(&sps->lock);
+
+	if (sps_rm_init(&sps->connection_ctrl, sps->options)) {
+		SPS_ERR(sps, "sps:%s:Fail to init SPS resource manager",
+				__func__);
+		goto exit_err;
+	}
+
+	result = sps_bam_driver_init(sps->options);
+	if (result) {
+		SPS_ERR(sps, "sps:%s:SPS BAM driver init failed", __func__);
+		goto exit_err;
+	}
+
+	/* Initialize the BAM DMA device */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	bamdma_props.phys_addr = sps->bamdma_bam_phys_base;
+	bamdma_props.virt_addr = ioremap(sps->bamdma_bam_phys_base,
+					 sps->bamdma_bam_size);
+
+	if (!bamdma_props.virt_addr) {
+		SPS_ERR(sps, "sps:%s:Fail to IO map BAM-DMA BAM registers.\n",
+				__func__);
+		goto exit_err;
+	}
+
+	SPS_DBG3(sps, "sps:bamdma_bam.phys=%pa.virt=0x%p.",
+		&bamdma_props.phys_addr,
+		bamdma_props.virt_addr);
+
+	bamdma_props.periph_phys_addr =	sps->bamdma_dma_phys_base;
+	bamdma_props.periph_virt_size = sps->bamdma_dma_size;
+	bamdma_props.periph_virt_addr = ioremap(sps->bamdma_dma_phys_base,
+						sps->bamdma_dma_size);
+
+	if (!bamdma_props.periph_virt_addr) {
+		SPS_ERR(sps, "sps:%s:Fail to IO map BAM-DMA peripheral reg.\n",
+				__func__);
+		goto exit_err;
+	}
+
+	SPS_DBG3(sps, "sps:bamdma_dma.phys=%pa.virt=0x%p.",
+		&bamdma_props.periph_phys_addr,
+		bamdma_props.periph_virt_addr);
+
+	bamdma_props.irq = sps->bamdma_irq;
+
+	bamdma_props.event_threshold = 0x10;	/* Pipe event threshold */
+	bamdma_props.summing_threshold = 0x10;	/* BAM event threshold */
+
+	bamdma_props.options = SPS_BAM_OPT_BAMDMA;
+	bamdma_props.restricted_pipes =	sps->bamdma_restricted_pipes;
+
+	result = sps_dma_init(&bamdma_props);
+	if (result) {
+		SPS_ERR(sps, "sps:%s:SPS BAM DMA driver init failed", __func__);
+		goto exit_err;
+	}
+#endif /* CONFIG_SPS_SUPPORT_BAMDMA */
+
+	result = sps_map_init(NULL, sps->options);
+	if (result) {
+		SPS_ERR(sps,
+			"sps:%s:SPS connection mapping init failed", __func__);
+		goto exit_err;
+	}
+
+	success = true;
+exit_err:
+	if (!success) {
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		sps_device_de_init();
+#endif
+		return SPS_ERROR;
+	}
+
+	return 0;
+}
+
+/**
+ * De-initialize SPS device
+ *
+ * This function de-initializes the SPS device.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static void sps_device_de_init(void)
+{
+	SPS_DBG3(sps, "sps:%s.", __func__);
+
+	if (sps != NULL) {
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		sps_dma_de_init();
+#endif
+		/* Are there any remaining BAM registrations? */
+		if (!list_empty(&sps->bams_q))
+			SPS_ERR(sps,
+				"sps:%s:BAMs are still registered", __func__);
+
+		sps_map_de_init();
+
+		kfree(sps);
+	}
+
+	sps_mem_de_init();
+}
+
+/**
+ * Initialize client state context
+ *
+ * This function initializes a client state context struct.
+ *
+ * @client - Pointer to client state context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_client_init(struct sps_pipe *client)
+{
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	if (client == NULL)
+		return -EINVAL;
+
+	/*
+	 * NOTE: Cannot store any state within the SPS driver because
+	 * the driver init function may not have been called yet.
+	 */
+	memset(client, 0, sizeof(*client));
+	sps_rm_config_init(&client->connect);
+
+	client->client_state = SPS_STATE_DISCONNECT;
+	client->bam = NULL;
+
+	return 0;
+}
+
+/**
+ * De-initialize client state context
+ *
+ * This function de-initializes a client state context struct.
+ *
+ * @client - Pointer to client state context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_client_de_init(struct sps_pipe *client)
+{
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	if (client->client_state != SPS_STATE_DISCONNECT) {
+		SPS_ERR(sps, "sps:De-init client in connected state: 0x%x",
+				   client->client_state);
+		return SPS_ERROR;
+	}
+
+	client->bam = NULL;
+	client->map = NULL;
+	memset(&client->connect, 0, sizeof(client->connect));
+
+	return 0;
+}
+
+/**
+ * Find the BAM device from the physical address
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified physical address.
+ *
+ * @phys_addr - physical address of the BAM
+ *
+ * @return - pointer to the BAM device struct, or NULL on error
+ *
+ */
+static struct sps_bam *phy2bam(phys_addr_t phys_addr)
+{
+	struct sps_bam *bam;
+
+	SPS_DBG2(sps, "sps:%s.", __func__);
+
+	list_for_each_entry(bam, &sps->bams_q, list) {
+		if (bam->props.phys_addr == phys_addr)
+			return bam;
+	}
+
+	return NULL;
+}
+
+/**
+ * Find the handle of a BAM device based on the physical address
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified physical address, and returns its handle.
+ *
+ * @phys_addr - physical address of the BAM
+ *
+ * @h - device handle of the BAM
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_phy2h(phys_addr_t phys_addr, unsigned long *handle)
+{
+	struct sps_bam *bam;
+
+	SPS_DBG2(sps, "sps:%s.", __func__);
+
+	if (sps == NULL || !sps->is_ready) {
+		SPS_DBG3(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (handle == NULL) {
+		SPS_ERR(sps, "sps:%s:handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	list_for_each_entry(bam, &sps->bams_q, list) {
+		if (bam->props.phys_addr == phys_addr) {
+			*handle = (uintptr_t) bam;
+			return 0;
+		}
+	}
+
+	SPS_ERR(sps,
+		"sps: BAM device %pa is not registered yet.\n", &phys_addr);
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL(sps_phy2h);
+
+/**
+ * Setup desc/data FIFO for bam-to-bam connection
+ *
+ * @mem_buffer - Pointer to struct for allocated memory properties.
+ *
+ * @addr - address of FIFO
+ *
+ * @size - FIFO size
+ *
+ * @use_offset - use address offset instead of absolute address
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_setup_bam2bam_fifo(struct sps_mem_buffer *mem_buffer,
+		  u32 addr, u32 size, int use_offset)
+{
+	SPS_DBG1(sps, "sps:%s.", __func__);
+
+	if ((mem_buffer == NULL) || (size == 0)) {
+		SPS_ERR(sps, "sps:%s:invalid buffer address or size.",
+				__func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL || !sps->is_ready) {
+		SPS_DBG3(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (use_offset) {
+		if ((addr + size) <= sps->pipemem_size)
+			mem_buffer->phys_base = sps->pipemem_phys_base + addr;
+		else {
+			SPS_ERR(sps,
+				"sps:%s:requested mem is out of pipe mem range.\n",
+				__func__);
+			return SPS_ERROR;
+		}
+	} else {
+		if (addr >= sps->pipemem_phys_base &&
+			(addr + size) <= (sps->pipemem_phys_base
+						+ sps->pipemem_size))
+			mem_buffer->phys_base = addr;
+		else {
+			SPS_ERR(sps,
+				"sps:%s:requested mem is out of pipe mem range.\n",
+				__func__);
+			return SPS_ERROR;
+		}
+	}
+
+	mem_buffer->base = spsi_get_mem_ptr(mem_buffer->phys_base);
+	mem_buffer->size = size;
+
+	memset(mem_buffer->base, 0, mem_buffer->size);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_setup_bam2bam_fifo);
+
+/**
+ * Find the BAM device from the handle
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified device handle.
+ *
+ * @h - device handle of the BAM
+ *
+ * @return - pointer to the BAM device struct, or NULL on error
+ *
+ */
+struct sps_bam *sps_h2bam(unsigned long h)
+{
+	struct sps_bam *bam;
+
+	SPS_DBG1(sps, "sps:%s: BAM handle:0x%lx.", __func__, h);
+
+	if (h == SPS_DEV_HANDLE_MEM || h == SPS_DEV_HANDLE_INVALID)
+		return NULL;
+
+	list_for_each_entry(bam, &sps->bams_q, list) {
+		if ((uintptr_t) bam == h)
+			return bam;
+	}
+
+	SPS_ERR(sps, "sps:Can't find BAM device for handle 0x%lx.", h);
+
+	return NULL;
+}
+
+/**
+ * Lock BAM device
+ *
+ * This function obtains the BAM spinlock on the client's connection.
+ *
+ * @pipe - pointer to client pipe state
+ *
+ * @return pointer to BAM device struct, or NULL on error
+ *
+ */
+static struct sps_bam *sps_bam_lock(struct sps_pipe *pipe)
+{
+	struct sps_bam *bam;
+	u32 pipe_index;
+
+	bam = pipe->bam;
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:Connection is not in connected state.",
+				__func__);
+		return NULL;
+	}
+
+	spin_lock_irqsave(&bam->connection_lock, bam->irqsave_flags);
+
+	/* Verify client owns this pipe */
+	pipe_index = pipe->pipe_index;
+	if (pipe_index >= bam->props.num_pipes ||
+	    pipe != bam->pipes[pipe_index]) {
+		SPS_ERR(bam,
+			"sps:Client not owner of BAM %pa pipe: %d (max %d)",
+			&bam->props.phys_addr, pipe_index,
+			bam->props.num_pipes);
+		spin_unlock_irqrestore(&bam->connection_lock,
+						bam->irqsave_flags);
+		return NULL;
+	}
+
+	return bam;
+}
+
+/**
+ * Unlock BAM device
+ *
+ * This function releases the BAM spinlock on the client's connection.
+ *
+ * @bam - pointer to BAM device struct
+ *
+ */
+static inline void sps_bam_unlock(struct sps_bam *bam)
+{
+	spin_unlock_irqrestore(&bam->connection_lock, bam->irqsave_flags);
+}
+
+/**
+ * Connect an SPS connection end point
+ *
+ */
+int sps_connect(struct sps_pipe *h, struct sps_connect *connect)
+{
+	struct sps_pipe *pipe = h;
+	unsigned long dev;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (connect == NULL) {
+		SPS_ERR(sps, "sps:%s:connection is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL)
+		return -ENODEV;
+
+	if (!sps->is_ready) {
+		SPS_ERR(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EAGAIN;
+	}
+
+	if ((connect->lock_group != SPSRM_CLEAR)
+		&& (connect->lock_group > BAM_MAX_P_LOCK_GROUP_NUM)) {
+		SPS_ERR(sps,
+			"sps:%s:The value of pipe lock group is invalid.\n",
+			__func__);
+		return SPS_ERROR;
+	}
+
+	mutex_lock(&sps->lock);
+	/*
+	 * Must lock the BAM device at the top level function, so must
+	 * determine which BAM is the target for the connection
+	 */
+	if (connect->mode == SPS_MODE_SRC)
+		dev = connect->source;
+	else
+		dev = connect->destination;
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:Invalid BAM device handle: 0x%lx", dev);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	mutex_lock(&bam->lock);
+	SPS_DBG2(bam, "sps:sps_connect: bam %pa src 0x%lx dest 0x%lx mode %s",
+			BAM_ID(bam),
+			connect->source,
+			connect->destination,
+			connect->mode == SPS_MODE_SRC ? "SRC" : "DEST");
+
+	/* Allocate resources for the specified connection */
+	pipe->connect = *connect;
+	result = sps_rm_state_change(pipe, SPS_STATE_ALLOCATE);
+	if (result) {
+		mutex_unlock(&bam->lock);
+		goto exit_err;
+	}
+
+	/* Configure the connection */
+	result = sps_rm_state_change(pipe, SPS_STATE_CONNECT);
+	mutex_unlock(&bam->lock);
+	if (result) {
+		sps_disconnect(h);
+		goto exit_err;
+	}
+
+exit_err:
+	mutex_unlock(&sps->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_connect);
+
+/**
+ * Disconnect an SPS connection end point
+ *
+ * This function disconnects an SPS connection end point.
+ * The SPS hardware associated with that end point will be disabled.
+ * For a connection involving system memory (SPS_DEV_HANDLE_MEM), all
+ * connection resources are deallocated.  For a peripheral-to-peripheral
+ * connection, the resources associated with the connection will not be
+ * deallocated until both end points are closed.
+ *
+ * The client must call sps_connect() for the handle before calling
+ * this function.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_disconnect(struct sps_pipe *h)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_pipe *check;
+	struct sps_bam *bam;
+	int result;
+
+	if (pipe == NULL) {
+		SPS_ERR(sps, "sps:%s:Invalid pipe.", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = pipe->bam;
+	if (bam == NULL) {
+		SPS_ERR(sps,
+			"sps:%s:BAM device of this pipe is NULL.", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG2(bam,
+		"sps:sps_disconnect: bam %pa src 0x%lx dest 0x%lx mode %s",
+		BAM_ID(bam),
+		pipe->connect.source,
+		pipe->connect.destination,
+		pipe->connect.mode == SPS_MODE_SRC ? "SRC" : "DEST");
+
+	result = SPS_ERROR;
+	/* Cross-check client with map table */
+	if (pipe->connect.mode == SPS_MODE_SRC)
+		check = pipe->map->client_src;
+	else
+		check = pipe->map->client_dest;
+
+	if (check != pipe) {
+		SPS_ERR(sps, "sps:%s:Client context is corrupt", __func__);
+		goto exit_err;
+	}
+
+	/* Disconnect the BAM pipe */
+	mutex_lock(&bam->lock);
+	result = sps_rm_state_change(pipe, SPS_STATE_DISCONNECT);
+	mutex_unlock(&bam->lock);
+	if (result)
+		goto exit_err;
+
+	sps_rm_config_init(&pipe->connect);
+	result = 0;
+
+exit_err:
+
+	return result;
+}
+EXPORT_SYMBOL(sps_disconnect);
+
+/**
+ * Register an event object for an SPS connection end point
+ *
+ */
+int sps_register_event(struct sps_pipe *h, struct sps_register_event *reg)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (reg == NULL) {
+		SPS_ERR(sps, "sps:%s:registered event is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL)
+		return -ENODEV;
+
+	if (!sps->is_ready) {
+		SPS_ERR(sps, "sps:%s:sps driver not ready.\n", __func__);
+		return -EAGAIN;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG2(bam, "sps:%s; events:%d.\n", __func__, reg->options);
+
+	result = sps_bam_pipe_reg_event(bam, pipe->pipe_index, reg);
+	sps_bam_unlock(bam);
+	if (result)
+		SPS_ERR(bam,
+			"sps:Fail to register event for BAM %pa pipe %d",
+			&pipe->bam->props.phys_addr, pipe->pipe_index);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_register_event);
+
+/**
+ * Enable an SPS connection end point
+ *
+ */
+int sps_flow_on(struct sps_pipe *h)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result = 0;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG2(bam, "sps:%s.\n", __func__);
+
+	bam_pipe_halt(&bam->base, pipe->pipe_index, false);
+
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_flow_on);
+
+/**
+ * Disable an SPS connection end point
+ *
+ */
+int sps_flow_off(struct sps_pipe *h, enum sps_flow_off mode)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result = 0;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG2(bam, "sps:%s.\n", __func__);
+
+	bam_pipe_halt(&bam->base, pipe->pipe_index, true);
+
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_flow_off);
+
+/**
+ * Check if the flags on a descriptor/iovec are valid
+ *
+ * @flags - flags on a descriptor/iovec
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_check_iovec_flags(u32 flags)
+{
+	if ((flags & SPS_IOVEC_FLAG_NWD) &&
+		!(flags & (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_CMD))) {
+		SPS_ERR(sps,
+			"sps:%s:NWD is only valid with EOT or CMD.\n",
+			__func__);
+		return SPS_ERROR;
+	} else if ((flags & SPS_IOVEC_FLAG_EOT) &&
+		(flags & SPS_IOVEC_FLAG_CMD)) {
+		SPS_ERR(sps,
+			"sps:%s:EOT and CMD are not allowed to coexist.\n",
+			__func__);
+		return SPS_ERROR;
+	} else if (!(flags & SPS_IOVEC_FLAG_CMD) &&
+		(flags & (SPS_IOVEC_FLAG_LOCK | SPS_IOVEC_FLAG_UNLOCK))) {
+		static char err_msg[] =
+		"pipe lock/unlock flags are only valid with Command Descriptor";
+		SPS_ERR(sps, "sps:%s.\n", err_msg);
+		return SPS_ERROR;
+	} else if ((flags & SPS_IOVEC_FLAG_LOCK) &&
+		(flags & SPS_IOVEC_FLAG_UNLOCK)) {
+		static char err_msg[] =
+		"Can't lock and unlock a pipe by the same Command Descriptor";
+		SPS_ERR(sps, "sps:%s.\n", err_msg);
+		return SPS_ERROR;
+	} else if ((flags & SPS_IOVEC_FLAG_IMME) &&
+		(flags & SPS_IOVEC_FLAG_CMD)) {
+		SPS_ERR(sps,
+			"sps:%s:Immediate and CMD are not allowed to coexist.\n",
+			__func__);
+		return SPS_ERROR;
+	} else if ((flags & SPS_IOVEC_FLAG_IMME) &&
+		(flags & SPS_IOVEC_FLAG_NWD)) {
+		SPS_ERR(sps,
+			"sps:%s:Immediate and NWD are not allowed to coexist.\n",
+			__func__);
+		return SPS_ERROR;
+	}
+
+	return 0;
+}
+
+/**
+ * Perform a DMA transfer on an SPS connection end point
+ *
+ */
+int sps_transfer(struct sps_pipe *h, struct sps_transfer *transfer)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+	struct sps_iovec *iovec;
+	int i;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (transfer == NULL) {
+		SPS_ERR(sps, "sps:%s:transfer is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (transfer->iovec == NULL) {
+		SPS_ERR(sps, "sps:%s:iovec list is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (transfer->iovec_count == 0) {
+		SPS_ERR(sps, "sps:%s:iovec list is empty.\n", __func__);
+		return SPS_ERROR;
+	} else if (transfer->iovec_phys == 0) {
+		SPS_ERR(sps,
+			"sps:%s:iovec list address is invalid.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	/* Verify content of IOVECs */
+	iovec = transfer->iovec;
+	for (i = 0; i < transfer->iovec_count; i++) {
+		u32 flags = iovec->flags;
+
+		if (iovec->size > SPS_IOVEC_MAX_SIZE) {
+			SPS_ERR(sps,
+				"sps:%s:iovec size is invalid.\n", __func__);
+			return SPS_ERROR;
+		}
+
+		if (sps_check_iovec_flags(flags))
+			return SPS_ERROR;
+
+		iovec++;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG(bam, "sps:%s.\n", __func__);
+
+	result = sps_bam_pipe_transfer(bam, pipe->pipe_index, transfer);
+
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_transfer);
+
+/**
+ * Perform a single DMA transfer on an SPS connection end point
+ *
+ */
+int sps_transfer_one(struct sps_pipe *h, phys_addr_t addr, u32 size,
+		     void *user, u32 flags)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps_check_iovec_flags(flags))
+		return SPS_ERROR;
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG(bam, "sps:%s.\n", __func__);
+
+	result = sps_bam_pipe_transfer_one(bam, pipe->pipe_index,
+				SPS_GET_LOWER_ADDR(addr), size, user,
+				DESC_FLAG_WORD(flags, addr));
+
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_transfer_one);
+
+/**
+ * Read event queue for an SPS connection end point
+ *
+ */
+int sps_get_event(struct sps_pipe *h, struct sps_event_notify *notify)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (notify == NULL) {
+		SPS_ERR(sps, "sps:%s:event_notify is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG1(bam, "sps:%s.\n", __func__);
+
+	result = sps_bam_pipe_get_event(bam, pipe->pipe_index, notify);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_get_event);
+
+/**
+ * Determine whether an SPS connection end point FIFO is empty
+ *
+ */
+int sps_is_pipe_empty(struct sps_pipe *h, u32 *empty)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (empty == NULL) {
+		SPS_ERR(sps, "sps:%s:result pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG1(bam, "sps:%s.\n", __func__);
+
+	result = sps_bam_pipe_is_empty(bam, pipe->pipe_index, empty);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_is_pipe_empty);
+
+/**
+ * Get number of free transfer entries for an SPS connection end point
+ *
+ */
+int sps_get_free_count(struct sps_pipe *h, u32 *count)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (count == NULL) {
+		SPS_ERR(sps, "sps:%s:result pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG(bam, "sps:%s.\n", __func__);
+
+	result = sps_bam_get_free_count(bam, pipe->pipe_index, count);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_get_free_count);
+
+/**
+ * Reset an SPS BAM device
+ *
+ */
+int sps_device_reset(unsigned long dev)
+{
+	struct sps_bam *bam;
+	int result;
+
+	if (dev == 0) {
+		SPS_ERR(sps,
+			"sps:%s:device handle should not be 0.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL || !sps->is_ready) {
+		SPS_DBG3(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	mutex_lock(&sps->lock);
+	/* Search for the target BAM device */
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:Invalid BAM device handle: 0x%lx", dev);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	SPS_DBG3(bam, "sps:%s.\n", __func__);
+
+	mutex_lock(&bam->lock);
+	result = sps_bam_reset(bam);
+	mutex_unlock(&bam->lock);
+	if (result) {
+		SPS_ERR(sps, "sps:Fail to reset BAM device: 0x%lx", dev);
+		goto exit_err;
+	}
+
+exit_err:
+	mutex_unlock(&sps->lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_device_reset);
+
+/**
+ * Get the configuration parameters for an SPS connection end point
+ *
+ */
+int sps_get_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	struct sps_pipe *pipe = h;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (config == NULL) {
+		SPS_ERR(sps, "sps:%s:config pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (pipe->bam == NULL)
+		SPS_DBG(sps, "sps:%s.\n", __func__);
+	else
+		SPS_DBG(pipe->bam,
+			"sps:%s; BAM: %pa; pipe index:%d; options:0x%x.\n",
+			__func__, BAM_ID(pipe->bam), pipe->pipe_index,
+			pipe->connect.options);
+
+	/* Copy current client connection state */
+	*config = pipe->connect;
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_get_config);
+
+/**
+ * Set the configuration parameters for an SPS connection end point
+ *
+ */
+int sps_set_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (config == NULL) {
+		SPS_ERR(sps, "sps:%s:config pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d, config-options:0x%x.\n",
+		__func__, BAM_ID(bam), pipe->pipe_index, config->options);
+
+	result = sps_bam_pipe_set_params(bam, pipe->pipe_index,
+					 config->options);
+	if (result == 0)
+		pipe->connect.options = config->options;
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_set_config);
+
+/**
+ * Set ownership of an SPS connection end point
+ *
+ */
+int sps_set_owner(struct sps_pipe *h, enum sps_owner owner,
+		  struct sps_satellite *connect)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (connect == NULL) {
+		SPS_ERR(sps, "sps:%s:connection is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (owner != SPS_OWNER_REMOTE) {
+		SPS_ERR(sps, "sps:Unsupported ownership state: %d", owner);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe->pipe_index);
+
+	result = sps_bam_set_satellite(bam, pipe->pipe_index);
+	if (result)
+		goto exit_err;
+
+	/* Return satellite connect info */
+	if (connect == NULL)
+		goto exit_err;
+
+	if (pipe->connect.mode == SPS_MODE_SRC) {
+		connect->dev = pipe->map->src.bam_phys;
+		connect->pipe_index = pipe->map->src.pipe_index;
+	} else {
+		connect->dev = pipe->map->dest.bam_phys;
+		connect->pipe_index = pipe->map->dest.pipe_index;
+	}
+	connect->config = SPS_CONFIG_SATELLITE;
+	connect->options = (enum sps_option) 0;
+
+exit_err:
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_set_owner);
+
+/**
+ * Allocate memory from the SPS Pipe-Memory.
+ *
+ */
+int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
+		  struct sps_mem_buffer *mem_buffer)
+{
+	if (sps == NULL)
+		return -ENODEV;
+
+	if (!sps->is_ready) {
+		SPS_ERR(sps, "sps:%s:sps driver is not ready.", __func__);
+		return -EAGAIN;
+	}
+
+	if (mem_buffer == NULL || mem_buffer->size == 0) {
+		SPS_ERR(sps, "sps:%s:invalid memory buffer address or size",
+				__func__);
+		return SPS_ERROR;
+	}
+
+	if (h == NULL)
+		SPS_DBG2(sps,
+			"sps:%s:allocate pipe memory before setup pipe",
+			__func__);
+	else
+		SPS_DBG2(sps,
+			"sps:allocate pipe memory for pipe %d", h->pipe_index);
+
+	mem_buffer->phys_base = sps_mem_alloc_io(mem_buffer->size);
+	if (mem_buffer->phys_base == SPS_ADDR_INVALID) {
+		SPS_ERR(sps, "sps:%s:invalid address of allocated memory",
+				__func__);
+		return SPS_ERROR;
+	}
+
+	mem_buffer->base = spsi_get_mem_ptr(mem_buffer->phys_base);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_alloc_mem);
+
+/**
+ * Free memory from the SPS Pipe-Memory.
+ *
+ */
+int sps_free_mem(struct sps_pipe *h, struct sps_mem_buffer *mem_buffer)
+{
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	if (mem_buffer == NULL || mem_buffer->phys_base == SPS_ADDR_INVALID) {
+		SPS_ERR(sps, "sps:%s:invalid memory to free", __func__);
+		return SPS_ERROR;
+	}
+
+	if (h == NULL)
+		SPS_DBG2(sps, "sps:%s:free pipe memory.", __func__);
+	else
+		SPS_DBG2(sps,
+			"sps:free pipe memory for pipe %d.", h->pipe_index);
+
+	sps_mem_free_io(mem_buffer->phys_base, mem_buffer->size);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_free_mem);
+
+/**
+ * Get the number of unused descriptors in the descriptor FIFO
+ * of a pipe
+ *
+ */
+int sps_get_unused_desc_num(struct sps_pipe *h, u32 *desc_num)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (desc_num == NULL) {
+		SPS_ERR(sps, "sps:%s:result pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL)
+		return SPS_ERROR;
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe->pipe_index);
+
+	result = sps_bam_pipe_get_unused_desc_num(bam, pipe->pipe_index,
+						desc_num);
+
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_get_unused_desc_num);
+
+/**
+ * Vote for or relinquish BAM DMA clock
+ *
+ */
+int sps_ctrl_bam_dma_clk(bool clk_on)
+{
+	int ret;
+
+	if (sps == NULL || !sps->is_ready) {
+		SPS_DBG3(sps, "sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	if (clk_on == true) {
+		SPS_DBG1(sps, "%s", "sps:vote for bam dma clk.\n");
+		ret = clk_prepare_enable(sps->bamdma_clk);
+		if (ret) {
+			SPS_ERR(sps,
+				"sps:fail to enable bamdma_clk:ret=%d\n", ret);
+			return ret;
+		}
+	} else {
+		SPS_DBG1(sps, "%s", "sps:relinquish bam dma clk.\n");
+		clk_disable_unprepare(sps->bamdma_clk);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_ctrl_bam_dma_clk);
+
+/**
+ * Register a BAM device
+ *
+ */
+int sps_register_bam_device(const struct sps_bam_props *bam_props,
+				unsigned long *dev_handle)
+{
+	struct sps_bam *bam = NULL;
+	void *virt_addr = NULL;
+	char bam_name[MAX_MSG_LEN];
+	u32 manage;
+	int ok;
+	int result;
+
+	if (bam_props == NULL) {
+		SPS_ERR(sps, "sps:%s:bam_props is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (dev_handle == NULL) {
+		SPS_ERR(sps, "sps:%s:device handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (sps == NULL) {
+		pr_err("sps:%s:sps driver is not ready.\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	SPS_DBG3(sps, "sps:%s: Client requests to register BAM %pa.\n",
+		__func__, &bam_props->phys_addr);
+
+	/* BAM-DMA is registered internally during power-up */
+	if ((!sps->is_ready) && !(bam_props->options & SPS_BAM_OPT_BAMDMA)) {
+		SPS_ERR(sps, "sps:%s:sps driver not ready.\n", __func__);
+		return -EAGAIN;
+	}
+
+	/* Check BAM parameters */
+	manage = bam_props->manage & SPS_BAM_MGR_ACCESS_MASK;
+	if (manage != SPS_BAM_MGR_NONE) {
+		if (bam_props->virt_addr == NULL && bam_props->virt_size == 0) {
+			SPS_ERR(sps, "sps:Invalid properties for BAM: %pa",
+					   &bam_props->phys_addr);
+			return SPS_ERROR;
+		}
+	}
+	if ((bam_props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) {
+		/* BAM global is configured by local processor */
+		if (bam_props->summing_threshold == 0) {
+			SPS_ERR(sps,
+				"sps:Invalid device ctrl properties for BAM: %pa",
+				&bam_props->phys_addr);
+			return SPS_ERROR;
+		}
+	}
+	manage = bam_props->manage &
+		  (SPS_BAM_MGR_PIPE_NO_CONFIG | SPS_BAM_MGR_PIPE_NO_CTRL);
+
+	/* In case of error */
+	*dev_handle = SPS_DEV_HANDLE_INVALID;
+	result = SPS_ERROR;
+
+	mutex_lock(&sps->lock);
+	/* Is this BAM already registered? */
+	bam = phy2bam(bam_props->phys_addr);
+	if (bam != NULL) {
+		mutex_unlock(&sps->lock);
+		SPS_ERR(sps, "sps:BAM is already registered: %pa",
+				&bam->props.phys_addr);
+		result = -EEXIST;
+		bam = NULL;   /* Avoid error clean-up kfree(bam) */
+		goto exit_err;
+	}
+
+	/* Perform virtual mapping if required */
+	if ((bam_props->manage & SPS_BAM_MGR_ACCESS_MASK) !=
+	    SPS_BAM_MGR_NONE && bam_props->virt_addr == NULL) {
+		/* Map the memory region */
+		virt_addr = ioremap(bam_props->phys_addr, bam_props->virt_size);
+		if (virt_addr == NULL) {
+			SPS_ERR(sps,
+				"sps:Unable to map BAM IO mem:%pa size:0x%x",
+				&bam_props->phys_addr, bam_props->virt_size);
+			goto exit_err;
+		}
+	}
+
+	bam = kzalloc(sizeof(*bam), GFP_KERNEL);
+	if (bam == NULL) {
+		SPS_ERR(sps,
+			"sps:Unable to allocate BAM device state: size is %zu",
+			sizeof(*bam));
+		goto exit_err;
+	}
+	memset(bam, 0, sizeof(*bam));
+
+	mutex_init(&bam->lock);
+	mutex_lock(&bam->lock);
+
+	/* Copy configuration to BAM device descriptor */
+	bam->props = *bam_props;
+	if (virt_addr != NULL)
+		bam->props.virt_addr = virt_addr;
+
+	snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_0",
+					&bam->props.phys_addr);
+	bam->ipc_log0 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							bam_name, 0);
+	if (!bam->ipc_log0)
+		SPS_ERR(sps, "%s : unable to create IPC Logging 0 for bam %pa",
+					__func__, &bam->props.phys_addr);
+
+	snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_1",
+					&bam->props.phys_addr);
+	bam->ipc_log1 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							bam_name, 0);
+	if (!bam->ipc_log1)
+		SPS_ERR(sps, "%s : unable to create IPC Logging 1 for bam %pa",
+					__func__, &bam->props.phys_addr);
+
+	snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_2",
+					&bam->props.phys_addr);
+	bam->ipc_log2 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							bam_name, 0);
+	if (!bam->ipc_log2)
+		SPS_ERR(sps, "%s : unable to create IPC Logging 2 for bam %pa",
+					__func__, &bam->props.phys_addr);
+
+	snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_3",
+					&bam->props.phys_addr);
+	bam->ipc_log3 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							bam_name, 0);
+	if (!bam->ipc_log3)
+		SPS_ERR(sps, "%s : unable to create IPC Logging 3 for bam %pa",
+					__func__, &bam->props.phys_addr);
+
+	snprintf(bam_name, sizeof(bam_name), "sps_bam_%pa_4",
+					&bam->props.phys_addr);
+	bam->ipc_log4 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							bam_name, 0);
+	if (!bam->ipc_log4)
+		SPS_ERR(sps, "%s : unable to create IPC Logging 4 for bam %pa",
+					__func__, &bam->props.phys_addr);
+
+	if (bam_props->ipc_loglevel)
+		bam->ipc_loglevel = bam_props->ipc_loglevel;
+	else
+		bam->ipc_loglevel = SPS_IPC_DEFAULT_LOGLEVEL;
+
+	ok = sps_bam_device_init(bam);
+	mutex_unlock(&bam->lock);
+	if (ok) {
+		SPS_ERR(bam, "sps:Fail to init BAM device: phys %pa",
+			&bam->props.phys_addr);
+		goto exit_err;
+	}
+
+	/* Add BAM to the list */
+	list_add_tail(&bam->list, &sps->bams_q);
+	*dev_handle = (uintptr_t) bam;
+
+	result = 0;
+exit_err:
+	mutex_unlock(&sps->lock);
+
+	if (result) {
+		if (bam != NULL) {
+			if (virt_addr != NULL)
+				iounmap(bam->props.virt_addr);
+			kfree(bam);
+		}
+
+		return result;
+	}
+
+	/* If this BAM is attached to a BAM-DMA, init the BAM-DMA device */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+		if (sps_dma_device_init((uintptr_t) bam)) {
+			bam->props.options &= ~SPS_BAM_OPT_BAMDMA;
+			sps_deregister_bam_device((uintptr_t) bam);
+			SPS_ERR(bam, "sps:Fail to init BAM-DMA BAM: phys %pa",
+				&bam->props.phys_addr);
+			return SPS_ERROR;
+		}
+	}
+#endif /* CONFIG_SPS_SUPPORT_BAMDMA */
+
+	SPS_INFO(bam, "sps:BAM %pa is registered.", &bam->props.phys_addr);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_register_bam_device);
+
+/**
+ * Deregister a BAM device
+ *
+ */
+int sps_deregister_bam_device(unsigned long dev_handle)
+{
+	struct sps_bam *bam;
+	int n;
+
+	if (dev_handle == 0) {
+		SPS_ERR(sps, "sps:%s:device handle should not be 0.\n",
+				__func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev_handle);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:did not find a BAM for this handle",
+				__func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG3(sps, "sps:%s: SPS deregister BAM: phys %pa.",
+		__func__, &bam->props.phys_addr);
+
+	if (bam->props.options & SPS_BAM_HOLD_MEM) {
+		for (n = 0; n < BAM_MAX_PIPES; n++)
+			if (bam->desc_cache_pointers[n] != NULL)
+				kfree(bam->desc_cache_pointers[n]);
+	}
+
+	/* If this BAM is attached to a BAM-DMA, init the BAM-DMA device */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+		mutex_lock(&bam->lock);
+		(void)sps_dma_device_de_init((uintptr_t) bam);
+		bam->props.options &= ~SPS_BAM_OPT_BAMDMA;
+		mutex_unlock(&bam->lock);
+	}
+#endif
+
+	/* Remove the BAM from the registration list */
+	mutex_lock(&sps->lock);
+	list_del(&bam->list);
+	mutex_unlock(&sps->lock);
+
+	/* De-init the BAM and free resources */
+	mutex_lock(&bam->lock);
+	sps_bam_device_de_init(bam);
+	mutex_unlock(&bam->lock);
+	ipc_log_context_destroy(bam->ipc_log1);
+	ipc_log_context_destroy(bam->ipc_log2);
+	if (bam->props.virt_size)
+		(void)iounmap(bam->props.virt_addr);
+
+	kfree(bam);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_deregister_bam_device);
+
+/**
+ * Get processed I/O vector (completed transfers)
+ *
+ */
+int sps_get_iovec(struct sps_pipe *h, struct sps_iovec *iovec)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (iovec == NULL) {
+		SPS_ERR(sps, "sps:%s:iovec pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe->pipe_index);
+
+	/* Get an iovec from the BAM pipe descriptor FIFO */
+	result = sps_bam_pipe_get_iovec(bam, pipe->pipe_index, iovec);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_get_iovec);
+
+/**
+ * Perform timer control
+ *
+ */
+int sps_timer_ctrl(struct sps_pipe *h,
+			struct sps_timer_ctrl *timer_ctrl,
+			struct sps_timer_result *timer_result)
+{
+	struct sps_pipe *pipe = h;
+	struct sps_bam *bam;
+	int result;
+
+	if (h == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (timer_ctrl == NULL) {
+		SPS_ERR(sps, "sps:%s:timer_ctrl pointer is NULL.\n", __func__);
+		return SPS_ERROR;
+	} else if (timer_result == NULL) {
+		SPS_DBG(sps, "sps:%s:no result to return.\n", __func__);
+	}
+
+	bam = sps_bam_lock(pipe);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG2(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe->pipe_index);
+
+	/* Perform the BAM pipe timer control operation */
+	result = sps_bam_pipe_timer_ctrl(bam, pipe->pipe_index, timer_ctrl,
+					 timer_result);
+	sps_bam_unlock(bam);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_timer_ctrl);
+
+/*
+ * Reset a BAM pipe
+ */
+int sps_pipe_reset(unsigned long dev, u32 pipe)
+{
+	struct sps_bam *bam;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (pipe >= BAM_MAX_PIPES) {
+		SPS_ERR(sps, "sps:%s:pipe index is invalid.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG2(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe);
+
+	bam_pipe_reset(&bam->base, pipe);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_pipe_reset);
+
+/*
+ * Disable a BAM pipe
+ */
+int sps_pipe_disable(unsigned long dev, u32 pipe)
+{
+	struct sps_bam *bam;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (pipe >= BAM_MAX_PIPES) {
+		SPS_ERR(sps, "sps:%s:pipe index is invalid.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe);
+
+	bam_disable_pipe(&bam->base, pipe);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_pipe_disable);
+
+/*
+ * Check pending descriptors in the descriptor FIFO
+ * of a pipe
+ */
+int sps_pipe_pending_desc(unsigned long dev, u32 pipe, bool *pending)
+{
+
+	struct sps_bam *bam;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (pipe >= BAM_MAX_PIPES) {
+		SPS_ERR(sps, "sps:%s:pipe index is invalid.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (!pending) {
+		SPS_ERR(sps, "sps:%s:input flag is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe);
+
+	*pending = sps_bam_pipe_pending_desc(bam, pipe);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_pipe_pending_desc);
+
+/*
+ * Process any pending IRQ of a BAM
+ */
+int sps_bam_process_irq(unsigned long dev)
+{
+	struct sps_bam *bam;
+	int ret = 0;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG1(bam, "sps:%s; BAM: %pa.\n", __func__, BAM_ID(bam));
+
+	ret = sps_bam_check_irq(bam);
+
+	return ret;
+}
+EXPORT_SYMBOL(sps_bam_process_irq);
+
+/*
+ * Get address info of a BAM
+ */
+int sps_get_bam_addr(unsigned long dev, phys_addr_t *base,
+				u32 *size)
+{
+	struct sps_bam *bam;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	*base = bam->props.phys_addr;
+	*size = bam->props.virt_size;
+
+	SPS_DBG2(bam, "sps:%s; BAM: %pa; base:%pa; size:%d.\n",
+		__func__, BAM_ID(bam), base, *size);
+
+	return 0;
+}
+EXPORT_SYMBOL(sps_get_bam_addr);
+
+/*
+ * Inject a ZLT with EOT for a BAM pipe
+ */
+int sps_pipe_inject_zlt(unsigned long dev, u32 pipe_index)
+{
+	struct sps_bam *bam;
+	int rc;
+
+	if (!dev) {
+		SPS_ERR(sps, "sps:%s:BAM handle is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	if (pipe_index >= BAM_MAX_PIPES) {
+		SPS_ERR(sps, "sps:%s:pipe index is invalid.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	bam = sps_h2bam(dev);
+	if (bam == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM is not found by handle.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	SPS_DBG(bam, "sps:%s; BAM: %pa; pipe index:%d.\n",
+		__func__, BAM_ID(bam), pipe_index);
+
+	rc = sps_bam_pipe_inject_zlt(bam, pipe_index);
+	if (rc)
+		SPS_ERR(bam, "sps:%s:failed to inject a ZLT.\n", __func__);
+
+	return rc;
+}
+EXPORT_SYMBOL(sps_pipe_inject_zlt);
+
+/**
+ * Allocate client state context
+ *
+ */
+struct sps_pipe *sps_alloc_endpoint(void)
+{
+	struct sps_pipe *ctx = NULL;
+
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	ctx = kzalloc(sizeof(struct sps_pipe), GFP_KERNEL);
+	if (ctx == NULL) {
+		SPS_ERR(sps, "sps:%s:Fail to allocate pipe context.",
+				__func__);
+		return NULL;
+	}
+
+	sps_client_init(ctx);
+
+	return ctx;
+}
+EXPORT_SYMBOL(sps_alloc_endpoint);
+
+/**
+ * Free client state context
+ *
+ */
+int sps_free_endpoint(struct sps_pipe *ctx)
+{
+	int res;
+
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	if (ctx == NULL) {
+		SPS_ERR(sps, "sps:%s:pipe is NULL.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	res = sps_client_de_init(ctx);
+
+	if (res == 0)
+		kfree(ctx);
+
+	return res;
+}
+EXPORT_SYMBOL(sps_free_endpoint);
+
+/**
+ * Platform Driver.
+ */
+static int get_platform_data(struct platform_device *pdev)
+{
+	struct resource *resource;
+	struct msm_sps_platform_data *pdata;
+
+	SPS_DBG3(sps, "sps:%s.", __func__);
+
+	pdata = pdev->dev.platform_data;
+
+	if (pdata == NULL) {
+		SPS_ERR(sps, "sps:%s:inavlid platform data.\n", __func__);
+		sps->bamdma_restricted_pipes = 0;
+		return -EINVAL;
+	}
+	sps->bamdma_restricted_pipes = pdata->bamdma_restricted_pipes;
+	SPS_DBG3(sps, "sps:bamdma_restricted_pipes=0x%x.\n",
+			sps->bamdma_restricted_pipes);
+
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						 "pipe_mem");
+	if (resource) {
+		sps->pipemem_phys_base = resource->start;
+		sps->pipemem_size = resource_size(resource);
+		SPS_DBG3(sps, "sps:pipemem.base=%pa,size=0x%x.\n",
+			&sps->pipemem_phys_base,
+			sps->pipemem_size);
+	}
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						 "bamdma_bam");
+	if (resource) {
+		sps->bamdma_bam_phys_base = resource->start;
+		sps->bamdma_bam_size = resource_size(resource);
+		SPS_DBG(sps, "sps:bamdma_bam.base=%pa,size=0x%x.",
+			&sps->bamdma_bam_phys_base,
+			sps->bamdma_bam_size);
+	}
+
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						 "bamdma_dma");
+	if (resource) {
+		sps->bamdma_dma_phys_base = resource->start;
+		sps->bamdma_dma_size = resource_size(resource);
+		SPS_DBG(sps, "sps:bamdma_dma.base=%pa,size=0x%x.",
+			&sps->bamdma_dma_phys_base,
+			sps->bamdma_dma_size);
+	}
+
+	resource  = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+						 "bamdma_irq");
+	if (resource) {
+		sps->bamdma_irq = resource->start;
+		SPS_DBG(sps, "sps:bamdma_irq=%d.", sps->bamdma_irq);
+	}
+#endif
+
+	return 0;
+}
+
+/**
+ * Read data from device tree
+ */
+static int get_device_tree_data(struct platform_device *pdev)
+{
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	struct resource *resource;
+
+	SPS_DBG(sps, "sps:%s.", __func__);
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-dma-res-pipes",
+				&sps->bamdma_restricted_pipes))
+		SPS_DBG(sps,
+			"sps:%s:No restricted bamdma pipes on this target.\n",
+			__func__);
+	else
+		SPS_DBG(sps, "sps:bamdma_restricted_pipes=0x%x.",
+			sps->bamdma_restricted_pipes);
+
+	resource  = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (resource) {
+		sps->bamdma_bam_phys_base = resource->start;
+		sps->bamdma_bam_size = resource_size(resource);
+		SPS_DBG(sps, "sps:bamdma_bam.base=%pa,size=0x%x.",
+			&sps->bamdma_bam_phys_base,
+			sps->bamdma_bam_size);
+	} else {
+		SPS_ERR(sps, "sps:%s:BAM DMA BAM mem unavailable.", __func__);
+		return -ENODEV;
+	}
+
+	resource  = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (resource) {
+		sps->bamdma_dma_phys_base = resource->start;
+		sps->bamdma_dma_size = resource_size(resource);
+		SPS_DBG(sps, "sps:bamdma_dma.base=%pa,size=0x%x.",
+			&sps->bamdma_dma_phys_base,
+			sps->bamdma_dma_size);
+	} else {
+		SPS_ERR(sps, "sps:%s:BAM DMA mem unavailable.", __func__);
+		return -ENODEV;
+	}
+
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	if (resource) {
+		imem = true;
+		sps->pipemem_phys_base = resource->start;
+		sps->pipemem_size = resource_size(resource);
+		SPS_DBG(sps, "sps:pipemem.base=%pa,size=0x%x.",
+			&sps->pipemem_phys_base,
+			sps->pipemem_size);
+	} else {
+		imem = false;
+		SPS_DBG(sps, "sps:%s:No pipe memory on this target.\n",
+				__func__);
+	}
+
+	resource  = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (resource) {
+		sps->bamdma_irq = resource->start;
+		SPS_DBG(sps, "sps:bamdma_irq=%d.", sps->bamdma_irq);
+	} else {
+		SPS_ERR(sps, "sps:%s:BAM DMA IRQ unavailable.", __func__);
+		return -ENODEV;
+	}
+#endif
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,device-type",
+				&d_type)) {
+		d_type = 3;
+		SPS_DBG3(sps, "sps:default device type %d.\n", d_type);
+	} else
+		SPS_DBG3(sps, "sps:device type is %d.", d_type);
+
+	enhd_pipe = of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,pipe-attr-ee");
+	SPS_DBG3(sps, "sps:PIPE_ATTR_EE is %s supported.\n",
+			(enhd_pipe ? "" : "not"));
+
+	return 0;
+}
+
+static const struct of_device_id msm_sps_match[] = {
+	{	.compatible = "qcom,msm_sps",
+		.data = &bam_types[SPS_BAM_NDP]
+	},
+	{	.compatible = "qcom,msm_sps_4k",
+		.data = &bam_types[SPS_BAM_NDP_4K]
+	},
+	{}
+};
+
+static int msm_sps_probe(struct platform_device *pdev)
+{
+	int ret = -ENODEV;
+
+	SPS_DBG3(sps, "sps:%s.", __func__);
+
+	if (pdev->dev.of_node) {
+		const struct of_device_id *match;
+
+		if (get_device_tree_data(pdev)) {
+			SPS_ERR(sps,
+				"sps:%s:Fail to get data from device tree.",
+				__func__);
+			return -ENODEV;
+		}
+		SPS_DBG(sps, "%s", "sps:get data from device tree.");
+
+		match = of_match_device(msm_sps_match, &pdev->dev);
+		if (match) {
+			bam_type = *((enum sps_bam_type *)(match->data));
+			SPS_DBG3(sps, "sps:BAM type is:%d\n", bam_type);
+		} else {
+			bam_type = SPS_BAM_NDP;
+			SPS_DBG3(sps, "sps:use default BAM type:%d\n",
+				bam_type);
+		}
+	} else {
+		d_type = 0;
+		if (get_platform_data(pdev)) {
+			SPS_ERR(sps, "sps:%s:Fail to get platform data.",
+				__func__);
+			return -ENODEV;
+		}
+		SPS_DBG(sps, "%s", "sps:get platform data.");
+		bam_type = SPS_BAM_LEGACY;
+	}
+
+	/* Create Device */
+	sps->dev_class = class_create(THIS_MODULE, SPS_DRV_NAME);
+
+	ret = alloc_chrdev_region(&sps->dev_num, 0, 1, SPS_DRV_NAME);
+	if (ret) {
+		SPS_ERR(sps, "sps:%s:alloc_chrdev_region err.", __func__);
+		goto alloc_chrdev_region_err;
+	}
+
+	sps->dev = device_create(sps->dev_class, NULL, sps->dev_num, sps,
+				SPS_DRV_NAME);
+	if (IS_ERR(sps->dev)) {
+		SPS_ERR(sps, "sps:%s:device_create err.", __func__);
+		goto device_create_err;
+	}
+
+	if (pdev->dev.of_node)
+		sps->dev->of_node = pdev->dev.of_node;
+
+	if (!d_type) {
+		sps->pmem_clk = clk_get(sps->dev, "mem_clk");
+		if (IS_ERR(sps->pmem_clk)) {
+			if (PTR_ERR(sps->pmem_clk) == -EPROBE_DEFER)
+				ret = -EPROBE_DEFER;
+			else
+				SPS_ERR(sps, "sps:%s:fail to get pmem_clk.",
+					__func__);
+			goto pmem_clk_err;
+		} else {
+			ret = clk_prepare_enable(sps->pmem_clk);
+			if (ret) {
+				SPS_ERR(sps,
+					"sps:%s:failed to enable pmem_clk.",
+					__func__);
+				goto pmem_clk_en_err;
+			}
+		}
+	}
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	sps->dfab_clk = clk_get(sps->dev, "dfab_clk");
+	if (IS_ERR(sps->dfab_clk)) {
+		if (PTR_ERR(sps->dfab_clk) == -EPROBE_DEFER)
+			ret = -EPROBE_DEFER;
+		else
+			SPS_ERR(sps, "sps:%s:fail to get dfab_clk.", __func__);
+		goto dfab_clk_err;
+	} else {
+		ret = clk_set_rate(sps->dfab_clk, 64000000);
+		if (ret) {
+			SPS_ERR(sps, "sps:%s:failed to set dfab_clk rate.",
+				__func__);
+			clk_put(sps->dfab_clk);
+			goto dfab_clk_err;
+		}
+	}
+
+	sps->bamdma_clk = clk_get(sps->dev, "dma_bam_pclk");
+	if (IS_ERR(sps->bamdma_clk)) {
+		if (PTR_ERR(sps->bamdma_clk) == -EPROBE_DEFER)
+			ret = -EPROBE_DEFER;
+		else
+			SPS_ERR(sps, "sps:%s:fail to get bamdma_clk.",
+				__func__);
+		clk_put(sps->dfab_clk);
+		goto dfab_clk_err;
+	} else {
+		ret = clk_prepare_enable(sps->bamdma_clk);
+		if (ret) {
+			SPS_ERR(sps, "sps:failed to enable bamdma_clk. ret=%d",
+									ret);
+			clk_put(sps->bamdma_clk);
+			clk_put(sps->dfab_clk);
+			goto dfab_clk_err;
+		}
+	}
+
+	ret = clk_prepare_enable(sps->dfab_clk);
+	if (ret) {
+		SPS_ERR(sps, "sps:failed to enable dfab_clk. ret=%d", ret);
+		clk_disable_unprepare(sps->bamdma_clk);
+		clk_put(sps->bamdma_clk);
+		clk_put(sps->dfab_clk);
+		goto dfab_clk_err;
+	}
+#endif
+	ret = sps_device_init();
+	if (ret) {
+		SPS_ERR(sps, "sps:%s:sps_device_init err.", __func__);
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		clk_disable_unprepare(sps->dfab_clk);
+		clk_disable_unprepare(sps->bamdma_clk);
+		clk_put(sps->bamdma_clk);
+		clk_put(sps->dfab_clk);
+#endif
+		goto dfab_clk_err;
+	}
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+	clk_disable_unprepare(sps->dfab_clk);
+	clk_disable_unprepare(sps->bamdma_clk);
+#endif
+	sps->is_ready = true;
+
+	SPS_INFO(sps, "%s", "sps:sps is ready.\n");
+
+	return 0;
+dfab_clk_err:
+	if (!d_type)
+		clk_disable_unprepare(sps->pmem_clk);
+pmem_clk_en_err:
+	if (!d_type)
+		clk_put(sps->pmem_clk);
+pmem_clk_err:
+	device_destroy(sps->dev_class, sps->dev_num);
+device_create_err:
+	unregister_chrdev_region(sps->dev_num, 1);
+alloc_chrdev_region_err:
+	class_destroy(sps->dev_class);
+
+	return ret;
+}
+
+static int msm_sps_remove(struct platform_device *pdev)
+{
+	SPS_DBG3(sps, "sps:%s.\n", __func__);
+
+	device_destroy(sps->dev_class, sps->dev_num);
+	unregister_chrdev_region(sps->dev_num, 1);
+	class_destroy(sps->dev_class);
+	sps_device_de_init();
+
+	clk_put(sps->dfab_clk);
+	if (!d_type)
+		clk_put(sps->pmem_clk);
+	clk_put(sps->bamdma_clk);
+
+	return 0;
+}
+
+static struct platform_driver msm_sps_driver = {
+	.probe          = msm_sps_probe,
+	.driver		= {
+		.name	= SPS_DRV_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = msm_sps_match,
+	},
+	.remove		= msm_sps_remove,
+};
+
+/**
+ * Module Init.
+ */
+static int __init sps_init(void)
+{
+	int ret;
+
+#ifdef CONFIG_DEBUG_FS
+	sps_debugfs_init();
+#endif
+
+	pr_debug("sps:%s.", __func__);
+
+	/* Allocate the SPS driver state struct */
+	sps = kzalloc(sizeof(*sps), GFP_KERNEL);
+	if (sps == NULL)
+		return -ENOMEM;
+
+	sps->ipc_log0 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							"sps_ipc_log0", 0);
+	if (!sps->ipc_log0)
+		pr_err("Failed to create IPC log0\n");
+	sps->ipc_log1 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							"sps_ipc_log1", 0);
+	if (!sps->ipc_log1)
+		pr_err("Failed to create IPC log1\n");
+	sps->ipc_log2 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							"sps_ipc_log2", 0);
+	if (!sps->ipc_log2)
+		pr_err("Failed to create IPC log2\n");
+	sps->ipc_log3 = ipc_log_context_create(SPS_IPC_LOGPAGES,
+							"sps_ipc_log3", 0);
+	if (!sps->ipc_log3)
+		pr_err("Failed to create IPC log3\n");
+	sps->ipc_log4 = ipc_log_context_create(SPS_IPC_LOGPAGES *
+				SPS_IPC_REG_DUMP_FACTOR, "sps_ipc_log4", 0);
+	if (!sps->ipc_log4)
+		pr_err("Failed to create IPC log4\n");
+
+	ret = platform_driver_register(&msm_sps_driver);
+
+	return ret;
+}
+
+/**
+ * Module Exit.
+ */
+static void __exit sps_exit(void)
+{
+	pr_debug("sps:%s.", __func__);
+
+	platform_driver_unregister(&msm_sps_driver);
+
+	if (sps != NULL) {
+		kfree(sps);
+		sps = NULL;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	sps_debugfs_exit();
+#endif
+}
+
+arch_initcall(sps_init);
+module_exit(sps_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Smart Peripheral Switch (SPS)");
diff --git a/drivers/platform/msm/sps/sps_bam.c b/drivers/platform/msm/sps/sps_bam.c
new file mode 100644
index 0000000..be4a2cc
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_bam.c
@@ -0,0 +1,2504 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+#include <linux/slab.h>		/* kzalloc() */
+#include <linux/interrupt.h>	/* request_irq() */
+#include <linux/memory.h>	/* memset */
+#include <linux/vmalloc.h>
+
+#include "sps_bam.h"
+#include "bam.h"
+#include "spsi.h"
+
+/* All BAM global IRQ sources */
+#define BAM_IRQ_ALL (BAM_DEV_IRQ_HRESP_ERROR | BAM_DEV_IRQ_ERROR |   \
+	BAM_DEV_IRQ_TIMER)
+
+/* BAM device state flags */
+#define BAM_STATE_INIT     (1UL << 1)
+#define BAM_STATE_IRQ      (1UL << 2)
+#define BAM_STATE_ENABLED  (1UL << 3)
+#define BAM_STATE_BAM2BAM  (1UL << 4)
+#define BAM_STATE_MTI      (1UL << 5)
+#define BAM_STATE_REMOTE   (1UL << 6)
+
+/* Mask for valid hardware descriptor flags */
+#define BAM_IOVEC_FLAG_MASK   \
+	(SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_EOB |   \
+	SPS_IOVEC_FLAG_NWD | SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_LOCK |   \
+	SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_IMME)
+
+/* Mask for invalid BAM-to-BAM pipe options */
+#define BAM2BAM_O_INVALID   \
+	(SPS_O_DESC_DONE | \
+	 SPS_O_EOT | \
+	 SPS_O_POLL | \
+	 SPS_O_NO_Q | \
+	 SPS_O_ACK_TRANSFERS)
+
+/**
+ * Pipe/client pointer value indicating pipe is allocated, but no client has
+ * been assigned
+ */
+#define BAM_PIPE_UNASSIGNED   ((struct sps_pipe *)((~0x0ul) - 0x88888888))
+
+/* Check whether pipe has been assigned */
+#define BAM_PIPE_IS_ASSIGNED(p)  \
+	(((p) != NULL) && ((p) != BAM_PIPE_UNASSIGNED))
+
+/* Is MTI use supported for a specific BAM version? */
+#define BAM_VERSION_MTI_SUPPORT(ver)   (ver <= 2)
+
+/* Event option<->event translation table entry */
+struct sps_bam_opt_event_table {
+	enum sps_event event_id;
+	enum sps_option option;
+	enum bam_pipe_irq pipe_irq;
+};
+
+static const struct sps_bam_opt_event_table opt_event_table[] = {
+	{SPS_EVENT_EOT, SPS_O_EOT, BAM_PIPE_IRQ_EOT},
+	{SPS_EVENT_DESC_DONE, SPS_O_DESC_DONE, BAM_PIPE_IRQ_DESC_INT},
+	{SPS_EVENT_WAKEUP, SPS_O_WAKEUP, BAM_PIPE_IRQ_WAKE},
+	{SPS_EVENT_INACTIVE, SPS_O_INACTIVE, BAM_PIPE_IRQ_TIMER},
+	{SPS_EVENT_OUT_OF_DESC, SPS_O_OUT_OF_DESC,
+		BAM_PIPE_IRQ_OUT_OF_DESC},
+	{SPS_EVENT_ERROR, SPS_O_ERROR, BAM_PIPE_IRQ_ERROR},
+	{SPS_EVENT_RST_ERROR, SPS_O_RST_ERROR, BAM_PIPE_IRQ_RST_ERROR},
+	{SPS_EVENT_HRESP_ERROR, SPS_O_HRESP_ERROR, BAM_PIPE_IRQ_HRESP_ERROR}
+};
+
+/* Pipe event source handler */
+static void pipe_handler(struct sps_bam *dev,
+			struct sps_pipe *pipe);
+
+/**
+ * Pipe transfer event (EOT, DESC_DONE) source handler.
+ * This function is called by pipe_handler() and other functions to process the
+ * descriptor FIFO.
+ */
+static void pipe_handler_eot(struct sps_bam *dev,
+			   struct sps_pipe *pipe);
+
+/**
+ * BAM driver initialization
+ */
+int sps_bam_driver_init(u32 options)
+{
+	int n;
+
+	/*
+	 * Check that SPS_O_ and BAM_PIPE_IRQ_ values are identical.
+	 * This is required so that the raw pipe IRQ status can be passed
+	 * to the client in the SPS_EVENT_IRQ.
+	 */
+	for (n = 0; n < ARRAY_SIZE(opt_event_table); n++) {
+		if ((u32)opt_event_table[n].option !=
+			(u32)opt_event_table[n].pipe_irq) {
+			SPS_ERR(sps, "sps:SPS_O 0x%x != HAL IRQ 0x%x\n",
+				opt_event_table[n].option,
+				opt_event_table[n].pipe_irq);
+			return SPS_ERROR;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Check BAM interrupt
+ */
+int sps_bam_check_irq(struct sps_bam *dev)
+{
+	struct sps_pipe *pipe;
+	u32 source;
+	unsigned long flags = 0;
+	int ret = 0;
+
+	SPS_DBG1(dev, "sps:%s:bam=%pa.\n", __func__, BAM_ID(dev));
+
+	spin_lock_irqsave(&dev->isr_lock, flags);
+
+polling:
+	/* Get BAM interrupt source(s) */
+	if ((dev->state & BAM_STATE_MTI) == 0) {
+		u32 mask = dev->pipe_active_mask;
+		enum sps_callback_case cb_case;
+
+		source = bam_check_irq_source(&dev->base, dev->props.ee,
+						mask, &cb_case);
+
+		SPS_DBG1(dev, "sps:bam=%pa;source=0x%x;mask=0x%x.\n",
+				BAM_ID(dev), source, mask);
+
+		if ((source == 0) &&
+			(dev->props.options & SPS_BAM_RES_CONFIRM)) {
+			SPS_DBG2(dev,
+				"sps: BAM %pa has no source (source = 0x%x).\n",
+				BAM_ID(dev), source);
+
+			spin_unlock_irqrestore(&dev->isr_lock, flags);
+			return SPS_ERROR;
+		}
+
+		if ((source & (1UL << 31)) && (dev->props.callback)) {
+			SPS_DBG1(dev, "sps:bam=%pa;callback for case %d.\n",
+				BAM_ID(dev), cb_case);
+			dev->props.callback(cb_case, dev->props.user);
+		}
+
+		/* Mask any non-local source */
+		source &= dev->pipe_active_mask;
+	} else {
+		/* If MTIs are used, must poll each active pipe */
+		source = dev->pipe_active_mask;
+
+		SPS_DBG1(dev, "sps:MTI:bam=%pa;source=0x%x.\n",
+				BAM_ID(dev), source);
+	}
+
+	/* Process active pipe sources */
+	pipe = list_first_entry(&dev->pipes_q, struct sps_pipe, list);
+
+	list_for_each_entry(pipe, &dev->pipes_q, list) {
+		/* Check this pipe's bit in the source mask */
+		if (BAM_PIPE_IS_ASSIGNED(pipe)
+				&& (!pipe->disconnecting)
+				&& (source & pipe->pipe_index_mask)) {
+			/* This pipe has an interrupt pending */
+			pipe_handler(dev, pipe);
+			source &= ~pipe->pipe_index_mask;
+		}
+		if (source == 0)
+			break;
+	}
+
+	/* Process any inactive pipe sources */
+	if (source) {
+		SPS_ERR(dev, "sps:IRQ from BAM %pa inactive pipe(s) 0x%x\n",
+			BAM_ID(dev), source);
+		dev->irq_from_disabled_pipe++;
+	}
+
+	if (dev->props.options & SPS_BAM_RES_CONFIRM) {
+		u32 mask = dev->pipe_active_mask;
+		enum sps_callback_case cb_case;
+
+		source = bam_check_irq_source(&dev->base, dev->props.ee,
+						mask, &cb_case);
+
+		SPS_DBG1(dev,
+			"sps:check if there is any new IRQ coming:bam=%pa;source=0x%x;mask=0x%x.\n",
+				BAM_ID(dev), source, mask);
+
+		if ((source & (1UL << 31)) && (dev->props.callback)) {
+			SPS_DBG1(dev, "sps:bam=%pa;callback for case %d.\n",
+				BAM_ID(dev), cb_case);
+			dev->props.callback(cb_case, dev->props.user);
+		}
+
+		if (source)
+			goto polling;
+	}
+
+	spin_unlock_irqrestore(&dev->isr_lock, flags);
+
+	return ret;
+}
+
+/**
+ * BAM interrupt service routine
+ *
+ * This function is the BAM interrupt service routine.
+ *
+ * @ctxt - pointer to ISR's registered argument
+ *
+ * @return void
+ */
+static irqreturn_t bam_isr(int irq, void *ctxt)
+{
+	struct sps_bam *dev = ctxt;
+
+	SPS_DBG1(dev, "sps:bam_isr: bam:%pa; IRQ #:%d.\n",
+		BAM_ID(dev), irq);
+
+	if (dev->props.options & SPS_BAM_RES_CONFIRM) {
+		if (dev->props.callback) {
+			bool ready = false;
+
+			dev->props.callback(SPS_CALLBACK_BAM_RES_REQ, &ready);
+			if (ready) {
+				SPS_DBG1(dev,
+					"sps:bam_isr: handle IRQ for bam:%pa IRQ #:%d.\n",
+					BAM_ID(dev), irq);
+				if (sps_bam_check_irq(dev))
+					SPS_DBG2(dev,
+						"sps:bam_isr: callback bam:%pa IRQ #:%d to poll the pipes.\n",
+						BAM_ID(dev), irq);
+				dev->props.callback(SPS_CALLBACK_BAM_RES_REL,
+							&ready);
+			} else {
+				SPS_DBG1(dev,
+					"sps:bam_isr: BAM is not ready and thus skip IRQ for bam:%pa IRQ #:%d.\n",
+					BAM_ID(dev), irq);
+			}
+		} else {
+			SPS_ERR(dev,
+				"sps:Client of BAM %pa requires confirmation but does not register callback\n",
+				BAM_ID(dev));
+		}
+	} else {
+		sps_bam_check_irq(dev);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * BAM device enable
+ */
+int sps_bam_enable(struct sps_bam *dev)
+{
+	u32 num_pipes;
+	u32 irq_mask;
+	int result;
+	int rc;
+	int MTIenabled;
+
+	/* Is this BAM enabled? */
+	if ((dev->state & BAM_STATE_ENABLED))
+		return 0;	/* Yes, so no work to do */
+
+	/* Is there any access to this BAM? */
+	if ((dev->props.manage & SPS_BAM_MGR_ACCESS_MASK) == SPS_BAM_MGR_NONE) {
+		SPS_ERR(dev, "sps:No local access to BAM %pa\n", BAM_ID(dev));
+		return SPS_ERROR;
+	}
+
+	/* Set interrupt handling */
+	if ((dev->props.options & SPS_BAM_OPT_IRQ_DISABLED) != 0 ||
+	    dev->props.irq == SPS_IRQ_INVALID) {
+		/* Disable the BAM interrupt */
+		irq_mask = 0;
+		dev->state &= ~BAM_STATE_IRQ;
+	} else {
+		/* Register BAM ISR */
+		if (dev->props.irq > 0) {
+			if (dev->props.options & SPS_BAM_RES_CONFIRM) {
+				result = request_irq(dev->props.irq,
+					(irq_handler_t) bam_isr,
+					IRQF_TRIGGER_RISING, "sps", dev);
+				SPS_DBG3(dev,
+					"sps:BAM %pa uses edge for IRQ# %d\n",
+					BAM_ID(dev), dev->props.irq);
+			} else {
+				result = request_irq(dev->props.irq,
+					(irq_handler_t) bam_isr,
+					IRQF_TRIGGER_HIGH, "sps", dev);
+				SPS_DBG3(dev,
+					"sps:BAM %pa uses level for IRQ# %d\n",
+					BAM_ID(dev), dev->props.irq);
+			}
+		} else {
+			SPS_DBG3(dev,
+				"sps:BAM %pa does not have an valid IRQ# %d\n",
+				BAM_ID(dev), dev->props.irq);
+		}
+
+		if (result) {
+			SPS_ERR(dev, "sps:Failed to enable BAM %pa IRQ %d\n",
+				BAM_ID(dev), dev->props.irq);
+			return SPS_ERROR;
+		}
+
+		/* Enable the BAM interrupt */
+		irq_mask = BAM_IRQ_ALL;
+		dev->state |= BAM_STATE_IRQ;
+
+		/* Register BAM IRQ for apps wakeup */
+		if (dev->props.options & SPS_BAM_OPT_IRQ_WAKEUP) {
+			result = enable_irq_wake(dev->props.irq);
+
+			if (result) {
+				SPS_ERR(dev,
+					"sps:Fail to enable wakeup irq for BAM %pa IRQ %d\n",
+					BAM_ID(dev), dev->props.irq);
+				return SPS_ERROR;
+			}
+			SPS_DBG3(dev,
+				"sps:Enable wakeup irq for BAM %pa IRQ %d\n",
+				BAM_ID(dev), dev->props.irq);
+		}
+	}
+
+	/* Is global BAM control managed by the local processor? */
+	num_pipes = 0;
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0)
+		/* Yes, so initialize the BAM device */
+		rc = bam_init(&dev->base,
+				  dev->props.ee,
+				  (u16) dev->props.summing_threshold,
+				  irq_mask,
+				  &dev->version, &num_pipes,
+				  dev->props.options);
+	else
+		/* No, so just verify that it is enabled */
+		rc = bam_check(&dev->base, &dev->version,
+				dev->props.ee, &num_pipes);
+
+	if (rc) {
+		SPS_ERR(dev, "sps:Fail to init BAM %pa IRQ %d\n",
+			BAM_ID(dev), dev->props.irq);
+		return SPS_ERROR;
+	}
+
+	/* Check if this BAM supports MTIs (Message Triggered Interrupts) or
+	 * multiple EEs (Execution Environments).
+	 * MTI and EE support are mutually exclusive.
+	 */
+	MTIenabled = BAM_VERSION_MTI_SUPPORT(dev->version);
+
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) != 0 &&
+			(dev->props.manage & SPS_BAM_MGR_MULTI_EE) != 0 &&
+			dev->props.ee == 0 && MTIenabled) {
+		/*
+		 * BAM global is owned by remote processor and local processor
+		 * must use MTI. Thus, force EE index to a non-zero value to
+		 * insure that EE zero globals can't be modified.
+		 */
+		SPS_ERR(dev,
+			"sps:%s:EE for satellite BAM must be set to non-zero.\n",
+			__func__);
+		return SPS_ERROR;
+	}
+
+	/*
+	 * Enable MTI use (message triggered interrupt)
+	 * if local processor does not control the global BAM config
+	 * and this BAM supports MTIs.
+	 */
+	if ((dev->state & BAM_STATE_IRQ) != 0 &&
+		(dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) != 0 &&
+		MTIenabled) {
+		if (dev->props.irq_gen_addr == 0 ||
+		    dev->props.irq_gen_addr == SPS_ADDR_INVALID) {
+			SPS_ERR(dev,
+				"sps:MTI destination address not specified for BAM %pa\n",
+				BAM_ID(dev));
+			return SPS_ERROR;
+		}
+		dev->state |= BAM_STATE_MTI;
+	}
+
+	if (num_pipes) {
+		dev->props.num_pipes = num_pipes;
+		SPS_DBG3(dev,
+			"sps:BAM %pa number of pipes reported by hw: %d\n",
+				 BAM_ID(dev), dev->props.num_pipes);
+	}
+
+	/* Check EE index */
+	if (!MTIenabled && dev->props.ee >= SPS_BAM_NUM_EES) {
+		SPS_ERR(dev, "sps:Invalid EE BAM %pa: %d\n", BAM_ID(dev),
+				dev->props.ee);
+		return SPS_ERROR;
+	}
+
+	/*
+	 * Process EE configuration parameters,
+	 * if specified in the properties
+	 */
+	if (!MTIenabled && dev->props.sec_config == SPS_BAM_SEC_DO_CONFIG) {
+		struct sps_bam_sec_config_props *p_sec =
+						dev->props.p_sec_config_props;
+		if (p_sec == NULL) {
+			SPS_ERR(dev,
+				"sps:EE config table is not specified for BAM %pa\n",
+				BAM_ID(dev));
+			return SPS_ERROR;
+		}
+
+		/*
+		 * Set restricted pipes based on the pipes assigned to local EE
+		 */
+		dev->props.restricted_pipes =
+					~p_sec->ees[dev->props.ee].pipe_mask;
+
+		/*
+		 * If local processor manages the BAM, perform the EE
+		 * configuration
+		 */
+		if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) {
+			u32 ee;
+			u32 pipe_mask;
+			int n, i;
+
+			/*
+			 * Verify that there are no overlapping pipe
+			 * assignments
+			 */
+			for (n = 0; n < SPS_BAM_NUM_EES - 1; n++) {
+				for (i = n + 1; i < SPS_BAM_NUM_EES; i++) {
+					if ((p_sec->ees[n].pipe_mask &
+						p_sec->ees[i].pipe_mask) != 0) {
+						SPS_ERR(dev,
+							"sps:Overlapping pipe assignments for BAM %pa: EEs %d and %d\n",
+							BAM_ID(dev), n, i);
+						return SPS_ERROR;
+					}
+				}
+			}
+
+			for (ee = 0; ee < SPS_BAM_NUM_EES; ee++) {
+				/*
+				 * MSbit specifies EE for the global (top-level)
+				 * BAM interrupt
+				 */
+				pipe_mask = p_sec->ees[ee].pipe_mask;
+				if (ee == dev->props.ee)
+					pipe_mask |= (1UL << 31);
+				else
+					pipe_mask &= ~(1UL << 31);
+
+				bam_security_init(&dev->base, ee,
+						p_sec->ees[ee].vmid, pipe_mask);
+			}
+		}
+	}
+
+	/*
+	 * If local processor manages the BAM and the BAM supports MTIs
+	 * but does not support multiple EEs, set all restricted pipes
+	 * to MTI mode.
+	 */
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0
+			&& MTIenabled) {
+		u32 pipe_index;
+		u32 pipe_mask;
+
+		for (pipe_index = 0, pipe_mask = 1;
+		    pipe_index < dev->props.num_pipes;
+		    pipe_index++, pipe_mask <<= 1) {
+			if ((pipe_mask & dev->props.restricted_pipes) == 0)
+				continue;	/* This is a local pipe */
+
+			/*
+			 * Enable MTI with destination address of zero
+			 * (and source mask zero). Pipe is in reset,
+			 * so no interrupt will be generated.
+			 */
+			bam_pipe_satellite_mti(&dev->base, pipe_index, 0,
+						       dev->props.ee);
+		}
+	}
+
+	dev->state |= BAM_STATE_ENABLED;
+
+	if (!dev->props.constrained_logging ||
+		(dev->props.constrained_logging && dev->props.logging_number)) {
+		if (dev->props.logging_number > 0)
+			dev->props.logging_number--;
+		SPS_INFO(dev,
+			"sps:BAM %pa (va:0x%p) enabled: ver:0x%x, number of pipes:%d\n",
+			BAM_ID(dev), dev->base, dev->version,
+			dev->props.num_pipes);
+	} else
+		SPS_DBG3(dev,
+			"sps:BAM %pa (va:0x%p) enabled: ver:0x%x, number of pipes:%d\n",
+			BAM_ID(dev), dev->base, dev->version,
+			dev->props.num_pipes);
+
+	return 0;
+}
+
+/**
+ * BAM device disable
+ *
+ */
+int sps_bam_disable(struct sps_bam *dev)
+{
+	if ((dev->state & BAM_STATE_ENABLED) == 0)
+		return 0;
+
+	/* Is there any access to this BAM? */
+	if ((dev->props.manage & SPS_BAM_MGR_ACCESS_MASK) == SPS_BAM_MGR_NONE) {
+		SPS_ERR(dev, "sps:No local access to BAM %pa\n", BAM_ID(dev));
+		return SPS_ERROR;
+	}
+
+	/* Is this BAM controlled by the local processor? */
+	if ((dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE)) {
+		/* No, so just mark it disabled */
+		dev->state &= ~BAM_STATE_ENABLED;
+		if ((dev->state & BAM_STATE_IRQ) && (dev->props.irq > 0)) {
+			free_irq(dev->props.irq, dev);
+			dev->state &= ~BAM_STATE_IRQ;
+		}
+		return 0;
+	}
+
+	/* Disable BAM (interrupts) */
+	if ((dev->state & BAM_STATE_IRQ)) {
+		bam_exit(&dev->base, dev->props.ee);
+
+		/* Deregister BAM ISR */
+		if ((dev->state & BAM_STATE_IRQ))
+			if (dev->props.irq > 0)
+				free_irq(dev->props.irq, dev);
+		dev->state &= ~BAM_STATE_IRQ;
+	}
+
+	dev->state &= ~BAM_STATE_ENABLED;
+
+	SPS_DBG3(dev, "sps:BAM %pa disabled\n", BAM_ID(dev));
+
+	return 0;
+}
+
+/**
+ * BAM device initialization
+ */
+int sps_bam_device_init(struct sps_bam *dev)
+{
+	if (dev->props.virt_addr == NULL) {
+		SPS_ERR(dev, "sps:%s:NULL BAM virtual address\n", __func__);
+		return SPS_ERROR;
+	}
+	dev->base = (void *) dev->props.virt_addr;
+
+	if (dev->props.num_pipes == 0) {
+		/* Assume max number of pipes until BAM registers can be read */
+		dev->props.num_pipes = BAM_MAX_PIPES;
+		SPS_DBG3(dev, "sps:BAM %pa: assuming max number of pipes: %d\n",
+			BAM_ID(dev), dev->props.num_pipes);
+	}
+
+	/* Init BAM state data */
+	dev->state = 0;
+	dev->pipe_active_mask = 0;
+	dev->pipe_remote_mask = 0;
+	INIT_LIST_HEAD(&dev->pipes_q);
+
+	spin_lock_init(&dev->isr_lock);
+
+	spin_lock_init(&dev->connection_lock);
+
+	if ((dev->props.options & SPS_BAM_OPT_ENABLE_AT_BOOT))
+		if (sps_bam_enable(dev)) {
+			SPS_ERR(dev, "sps:%s:Fail to enable bam device\n",
+					__func__);
+			return SPS_ERROR;
+		}
+
+	SPS_DBG3(dev, "sps:BAM device: phys %pa IRQ %d\n",
+			BAM_ID(dev), dev->props.irq);
+
+	return 0;
+}
+
+/**
+ * BAM device de-initialization
+ *
+ */
+int sps_bam_device_de_init(struct sps_bam *dev)
+{
+	int result;
+
+	SPS_DBG3(dev, "sps:BAM device DEINIT: phys %pa IRQ %d\n",
+		BAM_ID(dev), dev->props.irq);
+
+	result = sps_bam_disable(dev);
+
+	return result;
+}
+
+/**
+ * BAM device reset
+ *
+ */
+int sps_bam_reset(struct sps_bam *dev)
+{
+	struct sps_pipe *pipe;
+	u32 pipe_index;
+	int result;
+
+	SPS_DBG3(dev, "sps:BAM device RESET: phys %pa IRQ %d\n",
+		BAM_ID(dev), dev->props.irq);
+
+	/* If BAM is enabled, then disable */
+	result = 0;
+	if ((dev->state & BAM_STATE_ENABLED)) {
+		/* Verify that no pipes are currently allocated */
+		for (pipe_index = 0; pipe_index < dev->props.num_pipes;
+		      pipe_index++) {
+			pipe = dev->pipes[pipe_index];
+			if (BAM_PIPE_IS_ASSIGNED(pipe)) {
+				SPS_ERR(dev,
+					"sps:BAM device %pa RESET failed: pipe %d in use\n",
+					BAM_ID(dev), pipe_index);
+				result = SPS_ERROR;
+				break;
+			}
+		}
+
+		if (result == 0)
+			result = sps_bam_disable(dev);
+	}
+
+	/* BAM will be reset as part of the enable process */
+	if (result == 0)
+		result = sps_bam_enable(dev);
+
+	return result;
+}
+
+/**
+ * Clear the BAM pipe state struct
+ *
+ * This function clears the BAM pipe state struct.
+ *
+ * @pipe - pointer to client pipe struct
+ *
+ */
+static void pipe_clear(struct sps_pipe *pipe)
+{
+	INIT_LIST_HEAD(&pipe->list);
+
+	pipe->state = 0;
+	pipe->pipe_index = SPS_BAM_PIPE_INVALID;
+	pipe->pipe_index_mask = 0;
+	pipe->irq_mask = 0;
+	pipe->mode = -1;
+	pipe->num_descs = 0;
+	pipe->desc_size = 0;
+	pipe->disconnecting = false;
+	pipe->late_eot = false;
+	memset(&pipe->sys, 0, sizeof(pipe->sys));
+	INIT_LIST_HEAD(&pipe->sys.events_q);
+}
+
+/**
+ * Allocate a BAM pipe
+ *
+ */
+u32 sps_bam_pipe_alloc(struct sps_bam *dev, u32 pipe_index)
+{
+	u32 pipe_mask;
+
+	if (pipe_index == SPS_BAM_PIPE_INVALID) {
+		/* Allocate a pipe from the BAM */
+		if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_ALLOC)) {
+			SPS_ERR(dev,
+				"sps:Restricted from allocating pipes on BAM %pa\n",
+				BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+		for (pipe_index = 0, pipe_mask = 1;
+		    pipe_index < dev->props.num_pipes;
+		    pipe_index++, pipe_mask <<= 1) {
+			if ((pipe_mask & dev->props.restricted_pipes))
+				continue;	/* This is a restricted pipe */
+
+			if (dev->pipes[pipe_index] == NULL)
+				break;	/* Found an available pipe */
+		}
+		if (pipe_index >= dev->props.num_pipes) {
+			SPS_ERR(dev, "sps:Fail to allocate pipe on BAM %pa\n",
+				BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+	} else {
+		/* Check that client-specified pipe is available */
+		if (pipe_index >= dev->props.num_pipes) {
+			SPS_ERR(dev,
+				"sps:Invalid pipe %d for allocate on BAM %pa\n",
+				pipe_index, BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+		if ((dev->props.restricted_pipes & (1UL << pipe_index))) {
+			SPS_ERR(dev, "sps:BAM %pa pipe %d is not local\n",
+				BAM_ID(dev), pipe_index);
+			return SPS_BAM_PIPE_INVALID;
+		}
+		if (dev->pipes[pipe_index] != NULL) {
+			SPS_ERR(dev,
+				"sps:Pipe %d already allocated on BAM %pa\n",
+				pipe_index, BAM_ID(dev));
+			return SPS_BAM_PIPE_INVALID;
+		}
+	}
+
+	/* Mark pipe as allocated */
+	dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
+
+	return pipe_index;
+}
+
+/**
+ * Free a BAM pipe
+ *
+ */
+void sps_bam_pipe_free(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe;
+
+	if (pipe_index >= dev->props.num_pipes) {
+		SPS_ERR(dev, "sps:Invalid BAM %pa pipe: %d\n", BAM_ID(dev),
+				pipe_index);
+		return;
+	}
+
+	/* Get the client pipe struct and mark the pipe free */
+	pipe = dev->pipes[pipe_index];
+	dev->pipes[pipe_index] = NULL;
+
+	/* Is the pipe currently allocated? */
+	if (pipe == NULL) {
+		SPS_ERR(dev,
+			"sps:Attempt to free unallocated pipe %d on BAM %pa\n",
+			pipe_index, BAM_ID(dev));
+		return;
+	}
+
+	if (pipe == BAM_PIPE_UNASSIGNED)
+		return;		/* Never assigned, so no work to do */
+
+	/* Return pending items to appropriate pools */
+	if (!list_empty(&pipe->sys.events_q)) {
+		struct sps_q_event *sps_event;
+
+		SPS_ERR(dev,
+			"sps:Disconnect BAM %pa pipe %d with events pending\n",
+			BAM_ID(dev), pipe_index);
+
+		sps_event = list_entry((&pipe->sys.events_q)->next,
+				typeof(*sps_event), list);
+
+		while (&sps_event->list != (&pipe->sys.events_q)) {
+			struct sps_q_event *sps_event_delete = sps_event;
+
+			list_del(&sps_event->list);
+			sps_event = list_entry(sps_event->list.next,
+					typeof(*sps_event), list);
+			kfree(sps_event_delete);
+		}
+	}
+
+	/* Clear the BAM pipe state struct */
+	pipe_clear(pipe);
+}
+
+/**
+ * Establish BAM pipe connection
+ *
+ */
+int sps_bam_pipe_connect(struct sps_pipe *bam_pipe,
+			 const struct sps_bam_connect_param *params)
+{
+	struct bam_pipe_parameters hw_params;
+	struct sps_bam *dev;
+	const struct sps_connection *map = bam_pipe->map;
+	const struct sps_conn_end_pt *map_pipe;
+	const struct sps_conn_end_pt *other_pipe;
+	void *desc_buf = NULL;
+	u32 pipe_index;
+	int result;
+
+	/* Clear the client pipe state and hw init struct */
+	pipe_clear(bam_pipe);
+	memset(&hw_params, 0, sizeof(hw_params));
+
+	/* Initialize the BAM state struct */
+	bam_pipe->mode = params->mode;
+
+	/* Set pipe streaming mode */
+	if ((params->options & SPS_O_STREAMING) == 0)
+		hw_params.stream_mode = BAM_STREAM_MODE_DISABLE;
+	else
+		hw_params.stream_mode = BAM_STREAM_MODE_ENABLE;
+
+	/* Determine which end point to connect */
+	if (bam_pipe->mode == SPS_MODE_SRC) {
+		map_pipe = &map->src;
+		other_pipe = &map->dest;
+		hw_params.dir = BAM_PIPE_PRODUCER;
+	} else {
+		map_pipe = &map->dest;
+		other_pipe = &map->src;
+		hw_params.dir = BAM_PIPE_CONSUMER;
+	}
+
+	/* Process map parameters */
+	dev = map_pipe->bam;
+	pipe_index = map_pipe->pipe_index;
+
+	SPS_DBG2(dev,
+		"sps:BAM %pa; pipe %d; mode:%d; options:0x%x.\n",
+		BAM_ID(dev), pipe_index, params->mode, params->options);
+
+	if (pipe_index >= dev->props.num_pipes) {
+		SPS_ERR(dev, "sps:Invalid BAM %pa pipe: %d\n", BAM_ID(dev),
+				pipe_index);
+		return SPS_ERROR;
+	}
+	hw_params.event_threshold = (u16) map_pipe->event_threshold;
+	hw_params.ee = dev->props.ee;
+	hw_params.lock_group = map_pipe->lock_group;
+
+	/* Verify that control of this pipe is allowed */
+	if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_CTRL) ||
+	    (dev->props.restricted_pipes & (1UL << pipe_index))) {
+		SPS_ERR(dev, "sps:BAM %pa pipe %d is not local\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Control without configuration permission is not supported yet */
+	if ((dev->props.manage & SPS_BAM_MGR_PIPE_NO_CONFIG)) {
+		SPS_ERR(dev,
+			"sps:BAM %pa pipe %d remote config is not supported\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Determine operational mode */
+	if (other_pipe->bam != NULL) {
+		unsigned long iova;
+		struct sps_bam *peer_bam = (struct sps_bam *)(other_pipe->bam);
+		/* BAM-to-BAM mode */
+		bam_pipe->state |= BAM_STATE_BAM2BAM;
+		hw_params.mode = BAM_PIPE_MODE_BAM2BAM;
+
+		if (dev->props.options & SPS_BAM_SMMU_EN) {
+			if (bam_pipe->mode == SPS_MODE_SRC)
+				iova = bam_pipe->connect.dest_iova;
+			else
+				iova = bam_pipe->connect.source_iova;
+			SPS_DBG2(dev,
+				"sps:BAM %pa pipe %d uses IOVA 0x%lx.\n",
+				 BAM_ID(dev), pipe_index, iova);
+			hw_params.peer_phys_addr = (u32)iova;
+		} else {
+			hw_params.peer_phys_addr = peer_bam->props.phys_addr;
+		}
+
+		hw_params.peer_pipe = other_pipe->pipe_index;
+
+		/* Verify FIFO buffers are allocated for BAM-to-BAM pipes */
+		if (map->desc.phys_base == SPS_ADDR_INVALID ||
+		    map->data.phys_base == SPS_ADDR_INVALID ||
+		    map->desc.size == 0 || map->data.size == 0) {
+			SPS_ERR(dev,
+				"sps:FIFO buffers are not allocated for BAM %pa pipe %d.\n",
+				BAM_ID(dev), pipe_index);
+			return SPS_ERROR;
+		}
+
+		if (dev->props.options & SPS_BAM_SMMU_EN) {
+			hw_params.data_base =
+				(phys_addr_t)bam_pipe->connect.data.iova;
+			SPS_DBG2(dev,
+				"sps:BAM %pa pipe %d uses IOVA 0x%lx for data FIFO.\n",
+				 BAM_ID(dev), pipe_index,
+				 bam_pipe->connect.data.iova);
+		} else {
+			hw_params.data_base = map->data.phys_base;
+		}
+
+		hw_params.data_size = map->data.size;
+
+		/* Clear the data FIFO for debug */
+		if (map->data.base != NULL && bam_pipe->mode == SPS_MODE_SRC)
+			memset_io(map->data.base, 0, hw_params.data_size);
+
+		/* set NWD bit for BAM2BAM producer pipe */
+		if (bam_pipe->mode == SPS_MODE_SRC) {
+			if ((params->options & SPS_O_WRITE_NWD) == 0)
+				hw_params.write_nwd = BAM_WRITE_NWD_DISABLE;
+			else
+				hw_params.write_nwd = BAM_WRITE_NWD_ENABLE;
+		}
+	} else {
+		/* System mode */
+		hw_params.mode = BAM_PIPE_MODE_SYSTEM;
+		bam_pipe->sys.desc_buf = map->desc.base;
+		bam_pipe->sys.desc_offset = 0;
+		bam_pipe->sys.acked_offset = 0;
+	}
+
+	/* Initialize the client pipe state */
+	bam_pipe->pipe_index = pipe_index;
+	bam_pipe->pipe_index_mask = 1UL << pipe_index;
+
+	/* Get virtual address for descriptor FIFO */
+	if (map->desc.phys_base != SPS_ADDR_INVALID) {
+		if (map->desc.size < (2 * sizeof(struct sps_iovec))) {
+			SPS_ERR(dev,
+				"sps:Invalid descriptor FIFO size for BAM %pa pipe %d: %d\n",
+				BAM_ID(dev), pipe_index, map->desc.size);
+			return SPS_ERROR;
+		}
+		desc_buf = map->desc.base;
+
+		/*
+		 * Note that descriptor base and size will be left zero from
+		 * the memset() above if the physical address was invalid.
+		 * This allows a satellite driver to set the FIFO as
+		 * local memory	for system mode.
+		 */
+
+		if (dev->props.options & SPS_BAM_SMMU_EN) {
+			hw_params.desc_base =
+				(phys_addr_t)bam_pipe->connect.desc.iova;
+			SPS_DBG2(dev,
+				"sps:BAM %pa pipe %d uses IOVA 0x%lx for desc FIFO.\n",
+				 BAM_ID(dev), pipe_index,
+				 bam_pipe->connect.desc.iova);
+		} else {
+			hw_params.desc_base = map->desc.phys_base;
+		}
+
+		hw_params.desc_size = map->desc.size;
+	}
+
+	/* Configure the descriptor FIFO for both operational modes */
+	if (desc_buf != NULL)
+		if (bam_pipe->mode == SPS_MODE_SRC ||
+		    hw_params.mode == BAM_PIPE_MODE_SYSTEM)
+			memset_io(desc_buf, 0, hw_params.desc_size);
+
+	bam_pipe->desc_size = hw_params.desc_size;
+	bam_pipe->num_descs = bam_pipe->desc_size / sizeof(struct sps_iovec);
+
+	result = SPS_ERROR;
+	/* Insure that the BAM is enabled */
+	if ((dev->state & BAM_STATE_ENABLED) == 0)
+		if (sps_bam_enable(dev))
+			goto exit_init_err;
+
+	/* Check pipe allocation */
+	if (dev->pipes[pipe_index] != BAM_PIPE_UNASSIGNED) {
+		SPS_ERR(dev, "sps:Invalid pipe %d on BAM %pa for connect\n",
+			pipe_index, BAM_ID(dev));
+		return SPS_ERROR;
+	}
+
+	if (bam_pipe_is_enabled(&dev->base, pipe_index)) {
+		if (params->options & SPS_O_NO_DISABLE)
+			SPS_DBG2(dev,
+				"sps:BAM %pa pipe %d is already enabled.\n",
+				BAM_ID(dev), pipe_index);
+		else {
+			SPS_ERR(dev, "sps:BAM %pa pipe %d sharing violation\n",
+				BAM_ID(dev), pipe_index);
+			return SPS_ERROR;
+		}
+	}
+
+	if (bam_pipe_init(&dev->base, pipe_index, &hw_params, dev->props.ee)) {
+		SPS_ERR(dev, "sps:BAM %pa pipe %d init error\n",
+			BAM_ID(dev), pipe_index);
+		goto exit_err;
+	}
+
+	/* Assign pipe to client */
+	dev->pipes[pipe_index] = bam_pipe;
+
+	/* Process configuration parameters */
+	if (params->options != 0 ||
+	    (bam_pipe->state & BAM_STATE_BAM2BAM) == 0) {
+		/* Process init-time only parameters */
+		u32 irq_gen_addr;
+
+		/* Set interrupt mode */
+		irq_gen_addr = SPS_ADDR_INVALID;
+		if ((params->options & SPS_O_IRQ_MTI))
+			/* Client has directly specified the MTI address */
+			irq_gen_addr = params->irq_gen_addr;
+		else if ((dev->state & BAM_STATE_MTI))
+			/* This BAM has MTI use enabled */
+			irq_gen_addr = dev->props.irq_gen_addr;
+
+		if (irq_gen_addr != SPS_ADDR_INVALID) {
+			/*
+			 * No checks - assume BAM is already setup for
+			 * MTI generation,
+			 * or the pipe will be set to satellite control.
+			 */
+			bam_pipe->state |= BAM_STATE_MTI;
+			bam_pipe->irq_gen_addr = irq_gen_addr;
+		}
+
+		/* Process runtime parameters */
+		if (sps_bam_pipe_set_params(dev, pipe_index,
+					  params->options)) {
+			dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
+			goto exit_err;
+		}
+	}
+
+	/* Indicate initialization is complete */
+	dev->pipes[pipe_index] = bam_pipe;
+	dev->pipe_active_mask |= 1UL << pipe_index;
+	list_add_tail(&bam_pipe->list, &dev->pipes_q);
+
+	SPS_DBG2(dev,
+		"sps:BAM %pa; pipe %d; pipe_index_mask:0x%x; pipe_active_mask:0x%x.\n",
+		BAM_ID(dev), pipe_index,
+		bam_pipe->pipe_index_mask, dev->pipe_active_mask);
+
+	bam_pipe->state |= BAM_STATE_INIT;
+	result = 0;
+exit_err:
+	if (result) {
+		if (params->options & SPS_O_NO_DISABLE)
+			SPS_DBG2(dev, "sps:BAM %pa pipe %d connection exits\n",
+				BAM_ID(dev), pipe_index);
+		else
+			bam_pipe_exit(&dev->base, pipe_index, dev->props.ee);
+	}
+exit_init_err:
+	if (result) {
+		/* Clear the client pipe state */
+		pipe_clear(bam_pipe);
+	}
+
+	return result;
+}
+
+/**
+ * Disconnect a BAM pipe connection
+ *
+ */
+int sps_bam_pipe_disconnect(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe;
+	int result;
+	unsigned long flags;
+
+	if (pipe_index >= dev->props.num_pipes) {
+		SPS_ERR(dev, "sps:Invalid BAM %pa pipe: %d\n", BAM_ID(dev),
+				pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Deallocate and reset the BAM pipe */
+	pipe = dev->pipes[pipe_index];
+	if (BAM_PIPE_IS_ASSIGNED(pipe)) {
+		if ((dev->pipe_active_mask & (1UL << pipe_index))) {
+			spin_lock_irqsave(&dev->isr_lock, flags);
+			list_del(&pipe->list);
+			dev->pipe_active_mask &= ~(1UL << pipe_index);
+			spin_unlock_irqrestore(&dev->isr_lock, flags);
+		}
+		dev->pipe_remote_mask &= ~(1UL << pipe_index);
+		if (pipe->connect.options & SPS_O_NO_DISABLE)
+			SPS_DBG2(dev, "sps:BAM %pa pipe %d exits.\n",
+				BAM_ID(dev), pipe_index);
+		else
+			bam_pipe_exit(&dev->base, pipe_index, dev->props.ee);
+		if (pipe->sys.desc_cache != NULL) {
+			u32 size = pipe->num_descs * sizeof(void *);
+
+			if (pipe->desc_size + size <= PAGE_SIZE) {
+				if (dev->props.options & SPS_BAM_HOLD_MEM)
+					memset(pipe->sys.desc_cache, 0,
+						pipe->desc_size + size);
+				else
+					kfree(pipe->sys.desc_cache);
+			} else {
+				vfree(pipe->sys.desc_cache);
+			}
+			pipe->sys.desc_cache = NULL;
+		}
+		dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
+		pipe_clear(pipe);
+		result = 0;
+	} else {
+		result = SPS_ERROR;
+	}
+
+	if (result)
+		SPS_ERR(dev, "sps:BAM %pa pipe %d already disconnected\n",
+			BAM_ID(dev), pipe_index);
+
+	return result;
+}
+
+/**
+ * Set BAM pipe interrupt enable state
+ *
+ * This function sets the interrupt enable state for a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @poll - true if SPS_O_POLL is set, false otherwise
+ *
+ */
+static void pipe_set_irq(struct sps_bam *dev, u32 pipe_index,
+				 u32 poll)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	enum bam_enable irq_enable;
+
+	SPS_DBG2(dev,
+		"sps:BAM:%pa; pipe %d; poll:%d, irq_mask:0x%x; pipe state:0x%x; dev state:0x%x.\n",
+		BAM_ID(dev), pipe_index, poll, pipe->irq_mask,
+		pipe->state, dev->state);
+
+	if (poll == 0 && pipe->irq_mask != 0 &&
+	    (dev->state & BAM_STATE_IRQ)) {
+		if ((pipe->state & BAM_STATE_BAM2BAM) != 0 &&
+		    (pipe->state & BAM_STATE_IRQ) == 0) {
+			/*
+			 * If enabling the interrupt for a BAM-to-BAM pipe,
+			 * clear the existing interrupt status
+			 */
+			(void)bam_pipe_get_and_clear_irq_status(&dev->base,
+							   pipe_index);
+		}
+		pipe->state |= BAM_STATE_IRQ;
+		irq_enable = BAM_ENABLE;
+		pipe->polled = false;
+	} else {
+		pipe->state &= ~BAM_STATE_IRQ;
+		irq_enable = BAM_DISABLE;
+		pipe->polled = true;
+		if (poll == 0 && pipe->irq_mask)
+			SPS_DBG2(dev,
+				"sps:BAM %pa pipe %d forced to use polling\n",
+				 BAM_ID(dev), pipe_index);
+	}
+	if ((pipe->state & BAM_STATE_MTI) == 0)
+		bam_pipe_set_irq(&dev->base, pipe_index, irq_enable,
+					 pipe->irq_mask, dev->props.ee);
+	else
+		bam_pipe_set_mti(&dev->base, pipe_index, irq_enable,
+					 pipe->irq_mask, pipe->irq_gen_addr);
+
+}
+
+/**
+ * Set BAM pipe parameters
+ *
+ */
+int sps_bam_pipe_set_params(struct sps_bam *dev, u32 pipe_index, u32 options)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	u32 mask;
+	int wake_up_is_one_shot;
+	int no_queue;
+	int ack_xfers;
+	u32 size;
+	int n;
+
+	SPS_DBG2(dev, "sps:BAM %pa pipe %d opt 0x%x\n",
+		BAM_ID(dev), pipe_index, options);
+
+	/* Capture some options */
+	wake_up_is_one_shot = ((options & SPS_O_WAKEUP_IS_ONESHOT));
+	no_queue = ((options & SPS_O_NO_Q));
+	ack_xfers = ((options & SPS_O_ACK_TRANSFERS));
+
+	pipe->hybrid = options & SPS_O_HYBRID;
+	pipe->late_eot = options & SPS_O_LATE_EOT;
+
+	/* Create interrupt source mask */
+	mask = 0;
+	for (n = 0; n < ARRAY_SIZE(opt_event_table); n++) {
+		/* Is client registering for this event? */
+		if ((options & opt_event_table[n].option) == 0)
+			continue;	/* No */
+
+		mask |= opt_event_table[n].pipe_irq;
+	}
+
+#ifdef SPS_BAM_STATISTICS
+	/* Is an illegal mode change specified? */
+	if (pipe->sys.desc_wr_count > 0 &&
+	    (no_queue != pipe->sys.no_queue
+	     || ack_xfers != pipe->sys.ack_xfers)) {
+		SPS_ERR(dev,
+			"sps:Queue/ack mode change after transfer: BAM %pa pipe %d opt 0x%x\n",
+			BAM_ID(dev), pipe_index, options);
+		return SPS_ERROR;
+	}
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Is client setting invalid options for a BAM-to-BAM connection? */
+	if ((pipe->state & BAM_STATE_BAM2BAM) &&
+	    (options & BAM2BAM_O_INVALID)) {
+		SPS_ERR(dev,
+			"sps:Invalid option for BAM-to-BAM: BAM %pa pipe %d opt 0x%x\n",
+			BAM_ID(dev), pipe_index, options);
+		return SPS_ERROR;
+	}
+
+	/* Allocate descriptor FIFO cache if NO_Q option is disabled */
+	if (!no_queue && pipe->sys.desc_cache == NULL && pipe->num_descs > 0
+	    && (pipe->state & BAM_STATE_BAM2BAM) == 0) {
+		/* Allocate both descriptor cache and user pointer array */
+		size = pipe->num_descs * sizeof(void *);
+
+		if (pipe->desc_size + size <= PAGE_SIZE) {
+			if ((dev->props.options &
+						SPS_BAM_HOLD_MEM)) {
+				if (dev->desc_cache_pointers[pipe_index]) {
+					pipe->sys.desc_cache =
+						dev->desc_cache_pointers
+							[pipe_index];
+				} else {
+					pipe->sys.desc_cache =
+						kzalloc(pipe->desc_size + size,
+								GFP_KERNEL);
+					dev->desc_cache_pointers[pipe_index] =
+							pipe->sys.desc_cache;
+				}
+			} else {
+				pipe->sys.desc_cache =
+						kzalloc(pipe->desc_size + size,
+							GFP_KERNEL);
+			}
+			if (pipe->sys.desc_cache == NULL) {
+				SPS_ERR(dev,
+					"sps:No memory for pipe%d of BAM %pa\n",
+						pipe_index, BAM_ID(dev));
+				return -ENOMEM;
+			}
+		} else {
+			pipe->sys.desc_cache =
+				vmalloc(pipe->desc_size + size);
+
+			if (pipe->sys.desc_cache == NULL) {
+				SPS_ERR(dev,
+					"sps:No memory for pipe %d of BAM %pa\n",
+					pipe_index, BAM_ID(dev));
+				return -ENOMEM;
+			}
+
+			memset(pipe->sys.desc_cache, 0, pipe->desc_size + size);
+		}
+
+		if (pipe->sys.desc_cache == NULL) {
+			/*** MUST BE LAST POINT OF FAILURE (see below) *****/
+			SPS_ERR(dev,
+				"sps:Desc cache error: BAM %pa pipe %d: %d\n",
+				BAM_ID(dev), pipe_index,
+				pipe->desc_size + size);
+			return SPS_ERROR;
+		}
+		pipe->sys.user_ptrs = (void **)(pipe->sys.desc_cache +
+						 pipe->desc_size);
+		pipe->sys.cache_offset = pipe->sys.acked_offset;
+	}
+
+	/*
+	 * No failures beyond this point. Note that malloc() is last point of
+	 * failure, so no free() handling is needed.
+	 */
+
+	/* Enable/disable the pipe's interrupt sources */
+	pipe->irq_mask = mask;
+	pipe_set_irq(dev, pipe_index, (options & SPS_O_POLL));
+
+	/* Store software feature enables */
+	pipe->wake_up_is_one_shot = wake_up_is_one_shot;
+	pipe->sys.no_queue = no_queue;
+	pipe->sys.ack_xfers = ack_xfers;
+
+	return 0;
+}
+
+/**
+ * Enable a BAM pipe
+ *
+ */
+int sps_bam_pipe_enable(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	/* Enable the BAM pipe */
+	bam_pipe_enable(&dev->base, pipe_index);
+	pipe->state |= BAM_STATE_ENABLED;
+
+	return 0;
+}
+
+/**
+ * Disable a BAM pipe
+ *
+ */
+int sps_bam_pipe_disable(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	/* Disable the BAM pipe */
+	if (pipe->connect.options & SPS_O_NO_DISABLE)
+		SPS_DBG2(dev, "sps:BAM %pa pipe %d enters disable state\n",
+			BAM_ID(dev), pipe_index);
+	else
+		bam_pipe_disable(&dev->base, pipe_index);
+
+	pipe->state &= ~BAM_STATE_ENABLED;
+
+	return 0;
+}
+
+/**
+ * Register an event for a BAM pipe
+ *
+ */
+int sps_bam_pipe_reg_event(struct sps_bam *dev,
+			   u32 pipe_index,
+			   struct sps_register_event *reg)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_bam_event_reg *event_reg;
+	int n;
+
+	if (pipe->sys.no_queue && reg->xfer_done != NULL &&
+	    reg->mode != SPS_TRIGGER_CALLBACK) {
+		SPS_ERR(dev,
+			"sps:Only callback events support for NO_Q: BAM %pa pipe %d mode %d\n",
+			BAM_ID(dev), pipe_index, reg->mode);
+		return SPS_ERROR;
+	}
+
+	for (n = 0; n < ARRAY_SIZE(opt_event_table); n++) {
+		int index;
+
+		/* Is client registering for this event? */
+		if ((reg->options & opt_event_table[n].option) == 0)
+			continue;	/* No */
+
+		index = SPS_EVENT_INDEX(opt_event_table[n].event_id);
+		if (index < 0)
+			SPS_ERR(dev,
+				"sps:Negative event index: BAM %pa pipe %d mode %d\n",
+				BAM_ID(dev), pipe_index, reg->mode);
+		else {
+			event_reg = &pipe->sys.event_regs[index];
+			event_reg->xfer_done = reg->xfer_done;
+			event_reg->callback = reg->callback;
+			event_reg->mode = reg->mode;
+			event_reg->user = reg->user;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * Submit a transfer of a single buffer to a BAM pipe
+ *
+ */
+int sps_bam_pipe_transfer_one(struct sps_bam *dev,
+				    u32 pipe_index, u32 addr, u32 size,
+				    void *user, u32 flags)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_iovec *desc;
+	struct sps_iovec iovec;
+	u32 next_write;
+	static int show_recom;
+
+	SPS_DBG(dev, "sps:BAM %pa pipe %d addr 0x%x size 0x%x flags 0x%x\n",
+			BAM_ID(dev), pipe_index, addr, size, flags);
+
+	/* Is this a BAM-to-BAM or satellite connection? */
+	if ((pipe->state & (BAM_STATE_BAM2BAM | BAM_STATE_REMOTE))) {
+		SPS_ERR(dev, "sps:Transfer on BAM-to-BAM: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/*
+	 * Client identifier (user pointer) is not supported for
+	 * SPS_O_NO_Q option.
+	 */
+	if (pipe->sys.no_queue && user != NULL) {
+		SPS_ERR(dev, "sps:User pointer arg non-NULL: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Determine if descriptor can be queued */
+	next_write = pipe->sys.desc_offset + sizeof(struct sps_iovec);
+	if (next_write >= pipe->desc_size)
+		next_write = 0;
+
+	if (next_write == pipe->sys.acked_offset) {
+		/*
+		 * If pipe is polled and client is not ACK'ing descriptors,
+		 * perform polling operation so that any outstanding ACKs
+		 * can occur.
+		 */
+		if (!pipe->sys.ack_xfers && pipe->polled) {
+			pipe_handler_eot(dev, pipe);
+			if (next_write == pipe->sys.acked_offset) {
+				if (!show_recom) {
+					show_recom = true;
+					SPS_ERR(dev,
+						"sps:Client of BAM %pa pipe %d is recommended to have flow control\n",
+						BAM_ID(dev), pipe_index);
+				}
+
+				SPS_DBG1(dev,
+					"sps:Descriptor FIFO is full for BAM %pa pipe %d after pipe_handler_eot\n",
+					BAM_ID(dev), pipe_index);
+				return SPS_ERROR;
+			}
+		} else {
+			if (!show_recom) {
+				show_recom = true;
+				SPS_ERR(dev,
+					"sps:Client of BAM %pa pipe %d is recommended to have flow control.\n",
+					BAM_ID(dev), pipe_index);
+			}
+
+			SPS_DBG1(dev,
+				"sps:Descriptor FIFO is full for BAM %pa pipe %d\n",
+				BAM_ID(dev), pipe_index);
+			return SPS_ERROR;
+		}
+	}
+
+	/* Create descriptor */
+	if (!pipe->sys.no_queue)
+		desc = (struct sps_iovec *) (pipe->sys.desc_cache +
+					      pipe->sys.desc_offset);
+	else
+		desc = &iovec;
+
+	desc->addr = addr;
+	desc->size = size;
+
+	if ((flags & SPS_IOVEC_FLAG_DEFAULT) == 0) {
+		desc->flags = (flags & BAM_IOVEC_FLAG_MASK)
+				| DESC_UPPER_ADDR(flags);
+	} else {
+		if (pipe->mode == SPS_MODE_SRC)
+			desc->flags = SPS_IOVEC_FLAG_INT
+					| DESC_UPPER_ADDR(flags);
+		else
+			desc->flags = (SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_EOT)
+					| DESC_UPPER_ADDR(flags);
+	}
+
+#ifdef SPS_BAM_STATISTICS
+	if ((flags & SPS_IOVEC_FLAG_INT))
+		pipe->sys.int_flags++;
+	if ((flags & SPS_IOVEC_FLAG_EOT))
+		pipe->sys.eot_flags++;
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Update hardware descriptor FIFO - should result in burst */
+	*((struct sps_iovec *) (pipe->sys.desc_buf + pipe->sys.desc_offset))
+	= *desc;
+
+	/* Record user pointer value */
+	if (!pipe->sys.no_queue) {
+		u32 index = pipe->sys.desc_offset / sizeof(struct sps_iovec);
+
+		pipe->sys.user_ptrs[index] = user;
+#ifdef SPS_BAM_STATISTICS
+		if (user != NULL)
+			pipe->sys.user_ptrs_count++;
+#endif /* SPS_BAM_STATISTICS */
+	}
+
+	/* Update descriptor ACK offset */
+	pipe->sys.desc_offset = next_write;
+
+#ifdef SPS_BAM_STATISTICS
+	/* Update statistics */
+	pipe->sys.desc_wr_count++;
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Notify pipe */
+	if ((flags & SPS_IOVEC_FLAG_NO_SUBMIT) == 0) {
+		wmb(); /* Memory Barrier */
+		bam_pipe_set_desc_write_offset(&dev->base, pipe_index,
+					       next_write);
+	}
+
+	if (dev->ipc_loglevel == 0)
+		SPS_DBG(dev,
+			"sps:%s: BAM phy addr:%pa; pipe %d; write pointer to tell HW: 0x%x; write pointer read from HW: 0x%x\n",
+			__func__, BAM_ID(dev), pipe_index, next_write,
+			bam_pipe_get_desc_write_offset(&dev->base, pipe_index));
+
+	return 0;
+}
+
+/**
+ * Submit a transfer to a BAM pipe
+ *
+ */
+int sps_bam_pipe_transfer(struct sps_bam *dev,
+			 u32 pipe_index, struct sps_transfer *transfer)
+{
+	struct sps_iovec *iovec;
+	u32 count;
+	u32 flags;
+	void *user;
+	int n;
+	int result;
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	if (transfer->iovec_count == 0) {
+		SPS_ERR(dev, "sps:iovec count zero: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	if (!pipe->sys.ack_xfers && pipe->polled) {
+		sps_bam_pipe_get_unused_desc_num(dev, pipe_index,
+					&count);
+		count = pipe->desc_size / sizeof(struct sps_iovec) - count - 1;
+	} else
+		sps_bam_get_free_count(dev, pipe_index, &count);
+
+	if (count < transfer->iovec_count) {
+		SPS_ERR(dev,
+			"sps:Insufficient free desc: BAM %pa pipe %d: %d\n",
+			BAM_ID(dev), pipe_index, count);
+		return SPS_ERROR;
+	}
+
+	user = NULL;		/* NULL for all except last descriptor */
+	for (n = (int)transfer->iovec_count - 1, iovec = transfer->iovec;
+	    n >= 0; n--, iovec++) {
+		if (n > 0) {
+			/* This is *not* the last descriptor */
+			flags = iovec->flags | SPS_IOVEC_FLAG_NO_SUBMIT;
+		} else {
+			/* This *is* the last descriptor */
+			flags = iovec->flags;
+			user = transfer->user;
+		}
+		result = sps_bam_pipe_transfer_one(dev, pipe_index,
+						 iovec->addr,
+						 iovec->size, user,
+						 flags);
+		if (result)
+			return SPS_ERROR;
+	}
+
+	return 0;
+}
+
+int sps_bam_pipe_inject_zlt(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_iovec *desc;
+	u32 read_p, write_p, next_write;
+
+	if (pipe->state & BAM_STATE_BAM2BAM)
+		SPS_DBG2(dev, "sps: BAM-to-BAM pipe: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+	else
+		SPS_DBG2(dev, "sps: BAM-to-System pipe: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+
+	if (!(pipe->state & BAM_STATE_ENABLED)) {
+		SPS_ERR(dev,
+			"sps: BAM %pa pipe %d is not enabled.\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	read_p = bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+	write_p = bam_pipe_get_desc_write_offset(&dev->base, pipe_index);
+
+	SPS_DBG2(dev,
+		"sps: BAM %pa pipe %d: read pointer:0x%x; write pointer:0x%x.\n",
+		BAM_ID(dev), pipe_index, read_p, write_p);
+
+	if (read_p == write_p) {
+		SPS_ERR(dev,
+			"sps: BAM %pa pipe %d: read pointer 0x%x is already equal to write pointer.\n",
+			BAM_ID(dev), pipe_index, read_p);
+		return SPS_ERROR;
+	}
+
+	next_write = write_p + sizeof(struct sps_iovec);
+	if (next_write >= pipe->desc_size) {
+		SPS_DBG2(dev,
+			"sps: BAM %pa pipe %d: next write is 0x%x: wrap around.\n",
+			BAM_ID(dev), pipe_index, next_write);
+		next_write = 0;
+	}
+
+	desc = (struct sps_iovec *) (pipe->connect.desc.base + write_p);
+	desc->addr = 0;
+	desc->size = 0;
+	desc->flags = SPS_IOVEC_FLAG_EOT;
+
+	bam_pipe_set_desc_write_offset(&dev->base, pipe_index,
+					       next_write);
+	wmb(); /* update write pointer in HW */
+	SPS_DBG2(dev,
+		"sps: BAM %pa pipe %d: write pointer to tell HW: 0x%x; write pointer read from HW: 0x%x\n",
+		BAM_ID(dev), pipe_index, next_write,
+		bam_pipe_get_desc_write_offset(&dev->base, pipe_index));
+
+	return 0;
+}
+
+/**
+ * Allocate an event tracking struct
+ *
+ * This function allocates an event tracking struct.
+ *
+ * @pipe - pointer to pipe state
+ *
+ * @event_reg - pointer to event registration
+ *
+ * @return - pointer to event notification struct, or NULL
+ *
+ */
+static struct sps_q_event *alloc_event(struct sps_pipe *pipe,
+					struct sps_bam_event_reg *event_reg)
+{
+	struct sps_q_event *event;
+
+	/* A callback event object is registered, so trigger with payload */
+	event = &pipe->sys.event;
+	memset(event, 0, sizeof(*event));
+
+	return event;
+}
+
+/**
+ * Trigger an event notification
+ *
+ * This function triggers an event notification.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ * @event_reg - pointer to event registration
+ *
+ * @sps_event - pointer to event struct
+ *
+ */
+static void trigger_event(struct sps_bam *dev,
+			  struct sps_pipe *pipe,
+			  struct sps_bam_event_reg *event_reg,
+			  struct sps_q_event *sps_event)
+{
+	if (sps_event == NULL) {
+		SPS_DBG1(dev, "%s", "sps:trigger_event.sps_event is NULL.\n");
+		return;
+	}
+
+	if (event_reg->xfer_done) {
+		complete(event_reg->xfer_done);
+		SPS_DBG(dev, "sps:trigger_event.done=%d.\n",
+			event_reg->xfer_done->done);
+	}
+
+	if (event_reg->callback) {
+		SPS_DBG(dev, "%s", "sps:trigger_event.using callback.\n");
+		event_reg->callback(&sps_event->notify);
+	}
+
+}
+
+/**
+ * Handle a BAM pipe's generic interrupt sources
+ *
+ * This function creates the event notification for a BAM pipe's
+ *    generic interrupt sources.  The caller of this function must lock the BAM
+ *    device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ * @event_id - event identifier enum
+ *
+ */
+static void pipe_handler_generic(struct sps_bam *dev,
+			       struct sps_pipe *pipe,
+			       enum sps_event event_id)
+{
+	struct sps_bam_event_reg *event_reg;
+	struct sps_q_event *sps_event;
+	int index;
+
+	index = SPS_EVENT_INDEX(event_id);
+	if (index < 0 || index >= SPS_EVENT_INDEX(SPS_EVENT_MAX))
+		return;
+
+	event_reg = &pipe->sys.event_regs[index];
+	sps_event = alloc_event(pipe, event_reg);
+	if (sps_event != NULL) {
+		sps_event->notify.event_id = event_id;
+		sps_event->notify.user = event_reg->user;
+		trigger_event(dev, pipe, event_reg, sps_event);
+	}
+}
+
+/**
+ * Handle a BAM pipe's WAKEUP interrupt sources
+ *
+ * This function creates the event notification for a BAM pipe's
+ *    WAKEUP interrupt source.  The caller of this function must lock the BAM
+ *    device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ */
+static void pipe_handler_wakeup(struct sps_bam *dev, struct sps_pipe *pipe)
+{
+	struct sps_bam_event_reg *event_reg;
+	struct sps_q_event *event;
+	u32 pipe_index = pipe->pipe_index;
+
+	if (pipe->wake_up_is_one_shot) {
+		SPS_DBG2(dev,
+			"sps:BAM:%pa pipe %d wake_up_is_one_shot; irq_mask:0x%x.\n",
+			BAM_ID(dev), pipe_index, pipe->irq_mask);
+		/* Disable the pipe WAKEUP interrupt source */
+		pipe->irq_mask &= ~BAM_PIPE_IRQ_WAKE;
+		pipe_set_irq(dev, pipe_index, pipe->polled);
+	}
+
+	event_reg = &pipe->sys.event_regs[SPS_EVENT_INDEX(SPS_EVENT_WAKEUP)];
+	event = alloc_event(pipe, event_reg);
+	if (event != NULL) {
+		event->notify.event_id = SPS_EVENT_WAKEUP;
+		event->notify.user = event_reg->user;
+		trigger_event(dev, pipe, event_reg, event);
+	}
+}
+
+/**
+ * Handle a BAM pipe's EOT/INT interrupt sources
+ *
+ * This function creates the event notification for a BAM pipe's EOT interrupt
+ *    source.  The caller of this function must lock the BAM device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe - pointer to pipe state
+ *
+ */
+static void pipe_handler_eot(struct sps_bam *dev, struct sps_pipe *pipe)
+{
+	struct sps_bam_event_reg *event_reg;
+	struct sps_q_event *event;
+	struct sps_iovec *desc;
+	struct sps_iovec *cache;
+	void **user;
+	u32 *update_offset;
+	u32 pipe_index = pipe->pipe_index;
+	u32 offset;
+	u32 end_offset;
+	enum sps_event event_id;
+	u32 flags;
+	u32 enabled;
+	int producer = (pipe->mode == SPS_MODE_SRC);
+
+	if (pipe->sys.handler_eot) {
+		/*
+		 * This can happen if the pipe is configured for polling
+		 * (IRQ disabled) and callback event generation.
+		 * The client may perform a get_iovec() inside the callback.
+		 */
+		SPS_DBG(dev,
+			"sps:%s; still handling EOT for pipe %d.\n",
+			__func__, pipe->pipe_index);
+		return;
+	}
+
+	pipe->sys.handler_eot = true;
+
+	/* Get offset of last descriptor completed by the pipe */
+	end_offset = bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+
+	if (dev->ipc_loglevel == 0)
+		SPS_DBG(dev,
+			"sps:%s; pipe index:%d; read pointer:0x%x; write pointer:0x%x; sys.acked_offset:0x%x.\n",
+			__func__, pipe->pipe_index, end_offset,
+			bam_pipe_get_desc_write_offset(&dev->base, pipe_index),
+			pipe->sys.acked_offset);
+
+	if (producer && pipe->late_eot) {
+		struct sps_iovec *desc_end;
+
+		if (end_offset == 0)
+			desc_end = (struct sps_iovec *)(pipe->sys.desc_buf
+				+ pipe->desc_size - sizeof(struct sps_iovec));
+		else
+			desc_end = (struct sps_iovec *)	(pipe->sys.desc_buf
+				+ end_offset - sizeof(struct sps_iovec));
+
+		if (!(desc_end->flags & SPS_IOVEC_FLAG_EOT)) {
+			if (end_offset == 0)
+				end_offset = pipe->desc_size
+					- sizeof(struct sps_iovec);
+			else
+				end_offset -= sizeof(struct sps_iovec);
+		}
+	}
+
+	/* If no queue, then do not generate any events */
+	if (pipe->sys.no_queue) {
+		if (!pipe->sys.ack_xfers) {
+			/* Client is not ACK'ing transfers, so do it now */
+			pipe->sys.acked_offset = end_offset;
+		}
+		pipe->sys.handler_eot = false;
+		SPS_DBG(dev,
+			"sps:%s; pipe %d has no queue.\n",
+			__func__, pipe->pipe_index);
+		return;
+	}
+
+	/*
+	 * Get offset of last descriptor processed by software,
+	 * and update to the last descriptor completed by the pipe
+	 */
+	if (!pipe->sys.ack_xfers) {
+		update_offset = &pipe->sys.acked_offset;
+		offset = *update_offset;
+	} else {
+		update_offset = &pipe->sys.cache_offset;
+		offset = *update_offset;
+	}
+
+	/* Are there any completed descriptors to process? */
+	if (offset == end_offset) {
+		pipe->sys.handler_eot = false;
+		SPS_DBG(dev,
+			"sps:%s; there is no completed desc to process for pipe %d.\n",
+			__func__, pipe->pipe_index);
+		return;
+	}
+
+	/* Determine enabled events */
+	enabled = 0;
+	if ((pipe->irq_mask & SPS_O_EOT))
+		enabled |= SPS_IOVEC_FLAG_EOT;
+
+	if ((pipe->irq_mask & SPS_O_DESC_DONE))
+		enabled |= SPS_IOVEC_FLAG_INT;
+
+	/*
+	 * For producer pipe, update the cached descriptor byte count and flags.
+	 * For consumer pipe, the BAM does not update the descriptors, so just
+	 * use the cached copies.
+	 */
+	if (producer) {
+		/*
+		 * Do copies in a tight loop to increase chance of
+		 * multi-descriptor burst accesses on the bus
+		 */
+		struct sps_iovec *desc_end;
+
+		/* Set starting point for copy */
+		desc = (struct sps_iovec *) (pipe->sys.desc_buf + offset);
+		cache =	(struct sps_iovec *) (pipe->sys.desc_cache + offset);
+
+		/* Fetch all completed descriptors to end of FIFO (wrap) */
+		if (end_offset < offset) {
+			desc_end = (struct sps_iovec *)
+				   (pipe->sys.desc_buf + pipe->desc_size);
+			while (desc < desc_end)
+				*cache++ = *desc++;
+
+			desc = (void *)pipe->sys.desc_buf;
+			cache = (void *)pipe->sys.desc_cache;
+		}
+
+		/* Fetch all remaining completed descriptors (no wrap) */
+		desc_end = (struct sps_iovec *)	(pipe->sys.desc_buf +
+						 end_offset);
+		while (desc < desc_end)
+			*cache++ = *desc++;
+	}
+
+	/* Process all completed descriptors */
+	cache = (struct sps_iovec *) (pipe->sys.desc_cache + offset);
+	user = &pipe->sys.user_ptrs[offset / sizeof(struct sps_iovec)];
+	for (;;) {
+		SPS_DBG(dev,
+			"sps:%s; pipe index:%d; iovec addr:0x%x; size:0x%x; flags:0x%x; enabled:0x%x; *user is %s NULL.\n",
+			__func__, pipe->pipe_index, cache->addr,
+			cache->size, cache->flags, enabled,
+			(*user == NULL) ? "" : "not");
+
+		/*
+		 * Increment offset to next descriptor and update pipe offset
+		 * so a client callback can fetch the I/O vector.
+		 */
+		offset += sizeof(struct sps_iovec);
+		if (offset >= pipe->desc_size)
+			/* Roll to start of descriptor FIFO */
+			offset = 0;
+
+		*update_offset = offset;
+#ifdef SPS_BAM_STATISTICS
+		pipe->sys.desc_rd_count++;
+#endif /* SPS_BAM_STATISTICS */
+
+		/* Did client request notification for this descriptor? */
+		flags = cache->flags & enabled;
+		if (*user != NULL || flags) {
+			int index;
+
+			if ((flags & SPS_IOVEC_FLAG_EOT))
+				event_id = SPS_EVENT_EOT;
+			else
+				event_id = SPS_EVENT_DESC_DONE;
+
+			index = SPS_EVENT_INDEX(event_id);
+			event_reg = &pipe->sys.event_regs[index];
+			event = alloc_event(pipe, event_reg);
+			if (event != NULL) {
+				/*
+				 * Store the descriptor and user pointer
+				 * in the notification
+				 */
+				event->notify.data.transfer.iovec = *cache;
+				event->notify.data.transfer.user = *user;
+
+				event->notify.event_id = event_id;
+				event->notify.user = event_reg->user;
+				trigger_event(dev, pipe, event_reg, event);
+			} else {
+				SPS_ERR(dev,
+					"sps: %s: pipe %d: event is NULL.\n",
+					__func__, pipe->pipe_index);
+			}
+#ifdef SPS_BAM_STATISTICS
+			if (*user != NULL)
+				pipe->sys.user_found++;
+#endif /* SPS_BAM_STATISTICS */
+		}
+
+		/* Increment to next descriptor */
+		if (offset == end_offset)
+			break;	/* No more descriptors */
+
+		if (offset) {
+			cache++;
+			user++;
+		} else {
+			cache = (void *)pipe->sys.desc_cache;
+			user = pipe->sys.user_ptrs;
+		}
+	}
+
+	pipe->sys.handler_eot = false;
+}
+
+/**
+ * Handle a BAM pipe's interrupt sources
+ *
+ * This function handles a BAM pipe's interrupt sources.
+ *    The caller of this function must lock the BAM device's mutex.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return void
+ *
+ */
+static void pipe_handler(struct sps_bam *dev, struct sps_pipe *pipe)
+{
+	u32 pipe_index;
+	u32 status;
+	enum sps_event event_id;
+
+	/* Get interrupt sources and ack all */
+	pipe_index = pipe->pipe_index;
+	status = bam_pipe_get_and_clear_irq_status(&dev->base, pipe_index);
+
+	SPS_DBG(dev, "sps:pipe_handler.bam %pa.pipe %d.status=0x%x.\n",
+			BAM_ID(dev), pipe_index, status);
+
+	/* Check for enabled interrupt sources */
+	status &= pipe->irq_mask;
+	if (status == 0)
+		/* No enabled interrupt sources are active */
+		return;
+
+	/*
+	 * Process the interrupt sources in order of frequency of occurrance.
+	 * Check for early exit opportunities.
+	 */
+
+	if ((status & (SPS_O_EOT | SPS_O_DESC_DONE)) &&
+	    (pipe->state & BAM_STATE_BAM2BAM) == 0) {
+		pipe_handler_eot(dev, pipe);
+		if (pipe->sys.no_queue) {
+			/*
+			 * EOT handler will not generate any event if there
+			 * is no queue,
+			 * so generate "empty" (no descriptor) event
+			 */
+			if ((status & SPS_O_EOT))
+				event_id = SPS_EVENT_EOT;
+			else
+				event_id = SPS_EVENT_DESC_DONE;
+
+			pipe_handler_generic(dev, pipe, event_id);
+		}
+		status &= ~(SPS_O_EOT | SPS_O_DESC_DONE);
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_WAKEUP)) {
+		pipe_handler_wakeup(dev, pipe);
+		status &= ~SPS_O_WAKEUP;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_INACTIVE)) {
+		pipe_handler_generic(dev, pipe, SPS_EVENT_INACTIVE);
+		status &= ~SPS_O_INACTIVE;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_OUT_OF_DESC)) {
+		pipe_handler_generic(dev, pipe,
+					     SPS_EVENT_OUT_OF_DESC);
+		status &= ~SPS_O_OUT_OF_DESC;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_RST_ERROR) && enhd_pipe) {
+		SPS_ERR(dev, "sps:bam %pa ;pipe 0x%x irq status=0x%x.\n"
+				"sps: BAM_PIPE_IRQ_RST_ERROR\n",
+				BAM_ID(dev), pipe_index, status);
+		bam_output_register_content(&dev->base, dev->props.ee);
+		pipe_handler_generic(dev, pipe,
+					     SPS_EVENT_RST_ERROR);
+		status &= ~SPS_O_RST_ERROR;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_O_HRESP_ERROR) && enhd_pipe) {
+		SPS_ERR(dev, "sps:bam %pa ;pipe 0x%x irq status=0x%x.\n"
+				"sps: BAM_PIPE_IRQ_HRESP_ERROR\n",
+				BAM_ID(dev), pipe_index, status);
+		bam_output_register_content(&dev->base, dev->props.ee);
+		pipe_handler_generic(dev, pipe,
+					     SPS_EVENT_HRESP_ERROR);
+		status &= ~SPS_O_HRESP_ERROR;
+		if (status == 0)
+			return;
+	}
+
+	if ((status & SPS_EVENT_ERROR))
+		pipe_handler_generic(dev, pipe, SPS_EVENT_ERROR);
+}
+
+/**
+ * Get a BAM pipe event
+ *
+ */
+int sps_bam_pipe_get_event(struct sps_bam *dev,
+			   u32 pipe_index, struct sps_event_notify *notify)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_q_event *event_queue;
+
+	if (pipe->sys.no_queue) {
+		SPS_ERR(dev,
+			"sps:Invalid connection for event: BAM %pa pipe %d context 0x%p\n",
+			BAM_ID(dev), pipe_index, pipe);
+		notify->event_id = SPS_EVENT_INVALID;
+		return SPS_ERROR;
+	}
+
+	/* If pipe is polled, perform polling operation */
+	if (pipe->polled && (pipe->state & BAM_STATE_BAM2BAM) == 0)
+		pipe_handler_eot(dev, pipe);
+
+	/* Pull an event off the synchronous event queue */
+	if (list_empty(&pipe->sys.events_q)) {
+		event_queue = NULL;
+		SPS_DBG(dev, "sps:events_q of bam %pa is empty.\n",
+							BAM_ID(dev));
+	} else {
+		SPS_DBG(dev, "sps:events_q of bam %pa is not empty.\n",
+			BAM_ID(dev));
+		event_queue =
+		list_first_entry(&pipe->sys.events_q, struct sps_q_event,
+				 list);
+		list_del(&event_queue->list);
+	}
+
+	/* Update client's event buffer */
+	if (event_queue == NULL) {
+		/* No event queued, so set client's event to "invalid" */
+		notify->event_id = SPS_EVENT_INVALID;
+	} else {
+		/*
+		 * Copy event into client's buffer and return the event
+		 * to the pool
+		 */
+		*notify = event_queue->notify;
+		kfree(event_queue);
+#ifdef SPS_BAM_STATISTICS
+		pipe->sys.get_events++;
+#endif /* SPS_BAM_STATISTICS */
+	}
+
+	return 0;
+}
+
+/**
+ * Get processed I/O vector
+ */
+int sps_bam_pipe_get_iovec(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_iovec *iovec)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	struct sps_iovec *desc;
+	u32 read_offset;
+
+	/* Is this a valid pipe configured for get_iovec use? */
+	if (!pipe->sys.ack_xfers ||
+	    (pipe->state & BAM_STATE_BAM2BAM) != 0 ||
+	    (pipe->state & BAM_STATE_REMOTE)) {
+		return SPS_ERROR;
+	}
+
+	/* If pipe is polled and queue is enabled, perform polling operation */
+	if ((pipe->polled || pipe->hybrid) && !pipe->sys.no_queue) {
+		SPS_DBG(dev,
+			"sps:%s; BAM: %pa; pipe index:%d; polled is %d; hybrid is %d.\n",
+			__func__, BAM_ID(dev), pipe_index,
+			pipe->polled, pipe->hybrid);
+		pipe_handler_eot(dev, pipe);
+	}
+
+	/* Is there a completed descriptor? */
+	if (pipe->sys.no_queue)
+		read_offset =
+		bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+	else
+		read_offset = pipe->sys.cache_offset;
+
+	if (read_offset == pipe->sys.acked_offset) {
+		/* No, so clear the iovec to indicate FIFO is empty */
+		memset(iovec, 0, sizeof(*iovec));
+		SPS_DBG(dev,
+			"sps:%s; BAM: %pa; pipe index:%d; no iovec to process.\n",
+			__func__, BAM_ID(dev), pipe_index);
+		return 0;
+	}
+
+	/* Fetch next descriptor */
+	desc = (struct sps_iovec *) (pipe->sys.desc_buf +
+				     pipe->sys.acked_offset);
+	*iovec = *desc;
+#ifdef SPS_BAM_STATISTICS
+	pipe->sys.get_iovecs++;
+#endif /* SPS_BAM_STATISTICS */
+
+	/* Update read/ACK offset */
+	pipe->sys.acked_offset += sizeof(struct sps_iovec);
+	if (pipe->sys.acked_offset >= pipe->desc_size)
+		pipe->sys.acked_offset = 0;
+
+	SPS_DBG(dev,
+		"sps:%s; pipe index:%d; iovec addr:0x%x; size:0x%x; flags:0x%x; acked_offset:0x%x.\n",
+		__func__, pipe->pipe_index, desc->addr,
+		desc->size, desc->flags, pipe->sys.acked_offset);
+
+	return 0;
+}
+
+/**
+ * Determine whether a BAM pipe descriptor FIFO is empty
+ *
+ */
+int sps_bam_pipe_is_empty(struct sps_bam *dev, u32 pipe_index,
+				u32 *empty)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	u32 end_offset;
+	u32 acked_offset;
+
+	/* Is this a satellite connection? */
+	if ((pipe->state & BAM_STATE_REMOTE)) {
+		SPS_ERR(dev, "sps:Is empty on remote: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Get offset of last descriptor completed by the pipe */
+	end_offset = bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+
+	if ((pipe->state & BAM_STATE_BAM2BAM) == 0)
+		/* System mode */
+		acked_offset = pipe->sys.acked_offset;
+	else
+		/* BAM-to-BAM */
+		acked_offset = bam_pipe_get_desc_write_offset(&dev->base,
+							  pipe_index);
+
+
+	/* Determine descriptor FIFO state */
+	if (end_offset == acked_offset) {
+		*empty = true;
+	} else {
+		if ((pipe->state & BAM_STATE_BAM2BAM) == 0) {
+			*empty = false;
+			SPS_DBG1(dev,
+				"sps:%s; pipe index:%d; this sys2bam pipe is NOT empty.\n",
+				__func__, pipe->pipe_index);
+			return 0;
+		}
+		if (bam_pipe_check_zlt(&dev->base, pipe_index)) {
+			bool p_idc;
+			u32 next_write;
+
+			p_idc = bam_pipe_check_pipe_empty(&dev->base,
+								pipe_index);
+
+			next_write = acked_offset + sizeof(struct sps_iovec);
+			if (next_write >= pipe->desc_size)
+				next_write = 0;
+
+			if (next_write == end_offset) {
+				*empty = true;
+				if (!p_idc)
+					SPS_DBG3(dev,
+						"sps:BAM %pa pipe %d pipe empty checking for ZLT.\n",
+						BAM_ID(dev), pipe_index);
+			} else {
+				*empty = false;
+			}
+		} else {
+			*empty = false;
+		}
+	}
+
+	SPS_DBG1(dev,
+		"sps:%s; pipe index:%d; this pipe is %s empty.\n",
+		__func__, pipe->pipe_index, *empty ? "" : "NOT");
+
+	return 0;
+}
+
+/**
+ * Get number of free slots in a BAM pipe descriptor FIFO
+ *
+ */
+int sps_bam_get_free_count(struct sps_bam *dev, u32 pipe_index,
+				 u32 *count)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+	u32 next_write;
+	u32 free;
+
+	/* Is this a BAM-to-BAM or satellite connection? */
+	if ((pipe->state & (BAM_STATE_BAM2BAM | BAM_STATE_REMOTE))) {
+		SPS_ERR(dev,
+			"sps:Free count on BAM-to-BAM or remote: BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		*count = 0;
+		return SPS_ERROR;
+	}
+
+	/* Determine descriptor FIFO state */
+	next_write = pipe->sys.desc_offset + sizeof(struct sps_iovec);
+	if (next_write >= pipe->desc_size)
+		next_write = 0;
+
+	if (pipe->sys.acked_offset >= next_write)
+		free = pipe->sys.acked_offset - next_write;
+	else
+		free = pipe->desc_size - next_write + pipe->sys.acked_offset;
+
+	free /= sizeof(struct sps_iovec);
+	*count = free;
+
+	return 0;
+}
+
+/**
+ * Set BAM pipe to satellite ownership
+ *
+ */
+int sps_bam_set_satellite(struct sps_bam *dev, u32 pipe_index)
+{
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	/*
+	 * Switch to satellite control is only supported on processor
+	 * that controls the BAM global config on multi-EE BAMs
+	 */
+	if ((dev->props.manage & SPS_BAM_MGR_MULTI_EE) == 0 ||
+	    (dev->props.manage & SPS_BAM_MGR_DEVICE_REMOTE)) {
+		SPS_ERR(dev,
+			"sps:Cannot grant satellite control to BAM %pa pipe %d\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Is this pipe locally controlled? */
+	if ((dev->pipe_active_mask & (1UL << pipe_index)) == 0) {
+		SPS_ERR(dev, "sps:BAM %pa pipe %d not local and active\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Disable local interrupts for this pipe */
+	if (!pipe->polled)
+		bam_pipe_set_irq(&dev->base, pipe_index, BAM_DISABLE,
+					 pipe->irq_mask, dev->props.ee);
+
+	if (BAM_VERSION_MTI_SUPPORT(dev->version)) {
+		/*
+		 * Set pipe to MTI interrupt mode.
+		 * Must be performed after IRQ disable,
+		 * because it is necessary to re-enable the IRQ to enable
+		 * MTI generation.
+		 * Set both pipe IRQ mask and MTI dest address to zero.
+		 */
+		if ((pipe->state & BAM_STATE_MTI) == 0 || pipe->polled) {
+			bam_pipe_satellite_mti(&dev->base, pipe_index, 0,
+						       dev->props.ee);
+			pipe->state |= BAM_STATE_MTI;
+		}
+	}
+
+	/* Indicate satellite control */
+	list_del(&pipe->list);
+	dev->pipe_active_mask &= ~(1UL << pipe_index);
+	dev->pipe_remote_mask |= pipe->pipe_index_mask;
+	pipe->state |= BAM_STATE_REMOTE;
+
+	return 0;
+}
+
+/**
+ * Perform BAM pipe timer control
+ *
+ */
+int sps_bam_pipe_timer_ctrl(struct sps_bam *dev,
+			    u32 pipe_index,
+			    struct sps_timer_ctrl *timer_ctrl,
+			    struct sps_timer_result *timer_result)
+{
+	enum bam_pipe_timer_mode mode;
+	int result = 0;
+
+	/* Is this pipe locally controlled? */
+	if ((dev->pipe_active_mask & (1UL << pipe_index)) == 0) {
+		SPS_ERR(dev, "sps:BAM %pa pipe %d not local and active\n",
+			BAM_ID(dev), pipe_index);
+		return SPS_ERROR;
+	}
+
+	/* Perform the timer operation */
+	switch (timer_ctrl->op) {
+	case SPS_TIMER_OP_CONFIG:
+		mode = (timer_ctrl->mode == SPS_TIMER_MODE_ONESHOT) ?
+			BAM_PIPE_TIMER_ONESHOT :
+			BAM_PIPE_TIMER_PERIODIC;
+		bam_pipe_timer_config(&dev->base, pipe_index, mode,
+				    timer_ctrl->timeout_msec * 8);
+		break;
+	case SPS_TIMER_OP_RESET:
+		bam_pipe_timer_reset(&dev->base, pipe_index);
+		break;
+	case SPS_TIMER_OP_READ:
+		break;
+	default:
+		result = SPS_ERROR;
+		break;
+	}
+
+	/* Provide the current timer value */
+	if (timer_result != NULL)
+		timer_result->current_timer =
+			bam_pipe_timer_get_count(&dev->base, pipe_index);
+
+	return result;
+}
+
+/**
+ * Get the number of unused descriptors in the descriptor FIFO
+ * of a pipe
+ */
+int sps_bam_pipe_get_unused_desc_num(struct sps_bam *dev, u32 pipe_index,
+					u32 *desc_num)
+{
+	u32 sw_offset, peer_offset, fifo_size;
+	u32 desc_size = sizeof(struct sps_iovec);
+	struct sps_pipe *pipe = dev->pipes[pipe_index];
+
+	if (pipe == NULL)
+		return SPS_ERROR;
+
+	fifo_size = pipe->desc_size;
+
+	sw_offset = bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+	if ((dev->props.options & SPS_BAM_CACHED_WP) &&
+		!(pipe->state & BAM_STATE_BAM2BAM)) {
+		peer_offset = pipe->sys.desc_offset;
+		SPS_DBG(dev,
+			"sps:BAM %pa pipe %d: peer offset in cache:0x%x\n",
+			BAM_ID(dev), pipe_index, peer_offset);
+	} else {
+		peer_offset = bam_pipe_get_desc_write_offset(&dev->base,
+				pipe_index);
+	}
+
+	if (sw_offset <= peer_offset)
+		*desc_num = (peer_offset - sw_offset) / desc_size;
+	else
+		*desc_num = (peer_offset + fifo_size - sw_offset) / desc_size;
+
+	return 0;
+}
+
+/*
+ * Check if a pipe of a BAM has any pending descriptor
+ */
+bool sps_bam_pipe_pending_desc(struct sps_bam *dev, u32 pipe_index)
+{
+	u32 sw_offset, peer_offset;
+
+	sw_offset = bam_pipe_get_desc_read_offset(&dev->base, pipe_index);
+	peer_offset = bam_pipe_get_desc_write_offset(&dev->base, pipe_index);
+
+	if (sw_offset == peer_offset)
+		return false;
+	else
+		return true;
+}
diff --git a/drivers/platform/msm/sps/sps_bam.h b/drivers/platform/msm/sps/sps_bam.h
new file mode 100644
index 0000000..468c492
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_bam.h
@@ -0,0 +1,616 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Function and data structure declarations for SPS BAM handling.
+ */
+
+
+#ifndef _SPSBAM_H_
+#define _SPSBAM_H_
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include "spsi.h"
+
+#define BAM_HANDLE_INVALID         0
+
+#define to_sps_bam_dev(x) \
+	container_of((x), struct sps_bam, base)
+
+enum bam_irq {
+	BAM_DEV_IRQ_RDY_TO_SLEEP = 0x00000001,
+	BAM_DEV_IRQ_HRESP_ERROR = 0x00000002,
+	BAM_DEV_IRQ_ERROR = 0x00000004,
+	BAM_DEV_IRQ_TIMER = 0x00000010,
+};
+
+/* Pipe interrupt mask */
+enum bam_pipe_irq {
+	/* BAM finishes descriptor which has INT bit selected */
+	BAM_PIPE_IRQ_DESC_INT = 0x00000001,
+	/* Inactivity timer Expires */
+	BAM_PIPE_IRQ_TIMER = 0x00000002,
+	/* Wakeup peripheral (i.e. USB) */
+	BAM_PIPE_IRQ_WAKE = 0x00000004,
+	/* Producer - no free space for adding a descriptor */
+	/* Consumer - no descriptors for processing */
+	BAM_PIPE_IRQ_OUT_OF_DESC = 0x00000008,
+	/* Pipe Error interrupt */
+	BAM_PIPE_IRQ_ERROR = 0x00000010,
+	/* End-Of-Transfer */
+	BAM_PIPE_IRQ_EOT = 0x00000020,
+	/* Pipe RESET unsuccessful */
+	BAM_PIPE_IRQ_RST_ERROR = 0x00000040,
+	/* Errorneous Hresponse by AHB MASTER */
+	BAM_PIPE_IRQ_HRESP_ERROR = 0x00000080,
+};
+
+/* Halt Type */
+enum bam_halt {
+	BAM_HALT_OFF = 0,
+	BAM_HALT_ON = 1,
+};
+
+/* Threshold values of the DMA channels */
+enum bam_dma_thresh_dma {
+	BAM_DMA_THRESH_512 = 0x3,
+	BAM_DMA_THRESH_256 = 0x2,
+	BAM_DMA_THRESH_128 = 0x1,
+	BAM_DMA_THRESH_64 = 0x0,
+};
+
+/* Weight values of the DMA channels */
+enum bam_dma_weight_dma {
+	BAM_DMA_WEIGHT_HIGH = 7,
+	BAM_DMA_WEIGHT_MED = 3,
+	BAM_DMA_WEIGHT_LOW = 1,
+	BAM_DMA_WEIGHT_DEFAULT = BAM_DMA_WEIGHT_LOW,
+	BAM_DMA_WEIGHT_DISABLE = 0,
+};
+
+
+/* Invalid pipe index value */
+#define SPS_BAM_PIPE_INVALID  ((u32)(-1))
+
+/* Parameters for sps_bam_pipe_connect() */
+struct sps_bam_connect_param {
+	/* which end point must be initialized */
+	enum sps_mode mode;
+
+	/* OR'd connection end point options (see SPS_O defines) */
+	u32 options;
+
+	/* SETPEND/MTI interrupt generation parameters */
+	u32 irq_gen_addr;
+	u32 irq_gen_data;
+
+};
+
+/* Event registration struct */
+struct sps_bam_event_reg {
+	/* Client's event object handle */
+	struct completion *xfer_done;
+	void (*callback)(struct sps_event_notify *notify);
+
+	/* Event trigger mode */
+	enum sps_trigger mode;
+
+	/* User pointer that will be provided in event payload data */
+	void *user;
+
+};
+
+/* Descriptor FIFO cache entry */
+struct sps_bam_desc_cache {
+	struct sps_iovec iovec;
+	void *user; /* User pointer registered with this transfer */
+};
+
+/* Forward declaration */
+struct sps_bam;
+
+/* System mode control */
+struct sps_bam_sys_mode {
+	/* Descriptor FIFO control */
+	u8 *desc_buf; /* Descriptor FIFO for BAM pipe */
+	u32 desc_offset; /* Next new descriptor to be written to hardware */
+	u32 acked_offset; /* Next descriptor to be retired by software */
+
+	/* Descriptor cache control (!no_queue only) */
+	u8 *desc_cache; /* Software cache of descriptor FIFO contents */
+	u32 cache_offset; /* Next descriptor to be cached (ack_xfers only) */
+
+	/* User pointers associated with cached descriptors */
+	void **user_ptrs;
+
+	/* Event handling */
+	struct sps_bam_event_reg event_regs[SPS_EVENT_INDEX(SPS_EVENT_MAX)];
+	struct list_head events_q;
+
+	struct sps_q_event event;	/* Temp storage for event creation */
+	int no_queue;	/* Whether events are queued */
+	int ack_xfers;	/* Whether client must ACK all descriptors */
+	int handler_eot; /* Whether EOT handling is in progress (debug) */
+
+	/* Statistics */
+#ifdef SPS_BAM_STATISTICS
+	u32 desc_wr_count;
+	u32 desc_rd_count;
+	u32 user_ptrs_count;
+	u32 user_found;
+	u32 int_flags;
+	u32 eot_flags;
+	u32 callback_events;
+	u32 wait_events;
+	u32 queued_events;
+	u32 get_events;
+	u32 get_iovecs;
+#endif /* SPS_BAM_STATISTICS */
+};
+
+/* BAM pipe descriptor */
+struct sps_pipe {
+	struct list_head list;
+
+	/* Client state */
+	u32 client_state;
+	struct sps_bam *bam;
+	struct sps_connect connect;
+	const struct sps_connection *map;
+
+	/* Pipe parameters */
+	u32 state;
+	u32 pipe_index;
+	u32 pipe_index_mask;
+	u32 irq_mask;
+	int polled;
+	int hybrid;
+	bool late_eot;
+	u32 irq_gen_addr;
+	enum sps_mode mode;
+	u32 num_descs; /* Size (number of elements) of descriptor FIFO */
+	u32 desc_size; /* Size (bytes) of descriptor FIFO */
+	int wake_up_is_one_shot; /* Whether WAKEUP event is a one-shot or not */
+
+	/* System mode control */
+	struct sps_bam_sys_mode sys;
+
+	bool disconnecting;
+};
+
+/* BAM device descriptor */
+struct sps_bam {
+	struct list_head list;
+
+	/* BAM device properties, including connection defaults */
+	struct sps_bam_props props;
+
+	/* BAM device state */
+	u32 state;
+	struct mutex lock;
+	void *base; /* BAM virtual base address */
+	u32 version;
+	spinlock_t isr_lock;
+	spinlock_t connection_lock;
+	unsigned long irqsave_flags;
+
+	/* Pipe state */
+	u32 pipe_active_mask;
+	u32 pipe_remote_mask;
+	struct sps_pipe *pipes[BAM_MAX_PIPES];
+	struct list_head pipes_q;
+
+	/* Statistics */
+	u32 irq_from_disabled_pipe;
+	u32 event_trigger_failures;
+
+	void *ipc_log0;
+	void *ipc_log1;
+	void *ipc_log2;
+	void *ipc_log3;
+	void *ipc_log4;
+
+	u32 ipc_loglevel;
+
+	/* Desc cache pointers */
+	u8 *desc_cache_pointers[BAM_MAX_PIPES];
+};
+
+/**
+ * BAM driver initialization
+ *
+ * This function initializes the BAM driver.
+ *
+ * @options - driver options bitflags (see SPS_OPT_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_driver_init(u32 options);
+
+/**
+ * BAM device initialization
+ *
+ * This function initializes a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_device_init(struct sps_bam *dev);
+
+/**
+ * BAM device de-initialization
+ *
+ * This function de-initializes a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_device_de_init(struct sps_bam *dev);
+
+/**
+ * BAM device reset
+ *
+ * This Function resets a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_reset(struct sps_bam *dev);
+
+/**
+ * BAM device enable
+ *
+ * This function enables a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_enable(struct sps_bam *dev);
+
+/**
+ * BAM device disable
+ *
+ * This Function disables a BAM device.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_disable(struct sps_bam *dev);
+
+/**
+ * Allocate a BAM pipe
+ *
+ * This function allocates a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - client-specified pipe index, or SPS_BAM_PIPE_INVALID if
+ *    any available pipe is acceptable
+ *
+ * @return - allocated pipe index, or SPS_BAM_PIPE_INVALID on error
+ *
+ */
+u32 sps_bam_pipe_alloc(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Free a BAM pipe
+ *
+ * This function frees a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ */
+void sps_bam_pipe_free(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Establish BAM pipe connection
+ *
+ * This function establishes a connection for a BAM pipe (end point).
+ *
+ * @client - pointer to client pipe state struct
+ *
+ * @params - connection parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_connect(struct sps_pipe *client,
+			const struct sps_bam_connect_param *params);
+
+/**
+ * Disconnect a BAM pipe connection
+ *
+ * This function disconnects a connection for a BAM pipe (end point).
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_disconnect(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Set BAM pipe parameters
+ *
+ * This function sets parameters for a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @options - bitflag options (see SPS_O_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_set_params(struct sps_bam *dev, u32 pipe_index, u32 options);
+
+/**
+ * Enable a BAM pipe
+ *
+ * This function enables a BAM pipe.  Note that this function
+ *    is separate from the pipe connect function to allow proper
+ *    sequencing of consumer enable followed by producer enable.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_enable(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Disable a BAM pipe
+ *
+ * This function disables a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_disable(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Register an event for a BAM pipe
+ *
+ * This function registers an event for a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @reg - pointer to event registration struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_reg_event(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_register_event *reg);
+
+/**
+ * Submit a transfer of a single buffer to a BAM pipe
+ *
+ * This function submits a transfer of a single buffer to a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @addr - physical address of buffer to transfer
+ *
+ * @size - number of bytes to transfer
+ *
+ * @user - user pointer to register for event
+ *
+ * @flags - descriptor flags (see SPS_IOVEC_FLAG defines)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_transfer_one(struct sps_bam *dev, u32 pipe_index, u32 addr,
+			      u32 size, void *user, u32 flags);
+
+/**
+ * Submit a transfer to a BAM pipe
+ *
+ * This function submits a transfer to a BAM pipe.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @transfer - pointer to transfer struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_transfer(struct sps_bam *dev, u32 pipe_index,
+			 struct sps_transfer *transfer);
+
+/**
+ * Get a BAM pipe event
+ *
+ * This function polls for a BAM pipe event.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @notify - pointer to event notification struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_get_event(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_event_notify *notify);
+
+/**
+ * Get processed I/O vector
+ *
+ * This function fetches the next processed I/O vector.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @iovec - Pointer to I/O vector struct (output).
+ *   This struct will be zeroed if there are no more processed I/O vectors.
+ *
+ * @return 0 on success, negative value on error
+ */
+int sps_bam_pipe_get_iovec(struct sps_bam *dev, u32 pipe_index,
+			   struct sps_iovec *iovec);
+
+/**
+ * Determine whether a BAM pipe descriptor FIFO is empty
+ *
+ * This function returns the empty state of a BAM pipe descriptor FIFO.
+ *
+ * The pipe mutex must be locked before calling this function.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @empty - pointer to client's empty status word (boolean)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_is_empty(struct sps_bam *dev, u32 pipe_index, u32 *empty);
+
+/**
+ * Get number of free slots in a BAM pipe descriptor FIFO
+ *
+ * This function returns the number of free slots in a BAM pipe descriptor FIFO.
+ *
+ * The pipe mutex must be locked before calling this function.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @count - pointer to count status
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_get_free_count(struct sps_bam *dev, u32 pipe_index, u32 *count);
+
+/**
+ * Set BAM pipe to satellite ownership
+ *
+ * This function sets the BAM pipe to satellite ownership.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_set_satellite(struct sps_bam *dev, u32 pipe_index);
+
+/**
+ * Perform BAM pipe timer control
+ *
+ * This function performs BAM pipe timer control operations.
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @timer_ctrl - Pointer to timer control specification
+ *
+ * @timer_result - Pointer to buffer for timer operation result.
+ *    This argument can be NULL if no result is expected for the operation.
+ *    If non-NULL, the current timer value will always provided.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_bam_pipe_timer_ctrl(struct sps_bam *dev, u32 pipe_index,
+			    struct sps_timer_ctrl *timer_ctrl,
+			    struct sps_timer_result *timer_result);
+
+
+/**
+ * Get the number of unused descriptors in the descriptor FIFO
+ * of a pipe
+ *
+ * @dev - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @desc_num - number of unused descriptors
+ *
+ */
+int sps_bam_pipe_get_unused_desc_num(struct sps_bam *dev, u32 pipe_index,
+					u32 *desc_num);
+
+/*
+ * sps_bam_check_irq - check IRQ of a BAM device.
+ * @dev - pointer to BAM device descriptor
+ *
+ * This function checks any pending interrupt of a BAM device.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_bam_check_irq(struct sps_bam *dev);
+
+/*
+ * sps_bam_pipe_pending_desc - checking pending descriptor.
+ * @dev:	BAM device handle
+ * @pipe_index:	pipe index
+ *
+ * This function checks if a pipe of a BAM has any pending descriptor.
+ *
+ * @return true if there is any desc pending
+ */
+bool sps_bam_pipe_pending_desc(struct sps_bam *dev, u32 pipe_index);
+
+/*
+ * sps_bam_pipe_inject_zlt - inject a ZLT with EOT.
+ * @dev:	BAM device handle
+ * @pipe_index:	pipe index
+ *
+ * This function injects a ZLT with EOT for a pipe of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_bam_pipe_inject_zlt(struct sps_bam *dev, u32 pipe_index);
+#endif	/* _SPSBAM_H_ */
diff --git a/drivers/platform/msm/sps/sps_core.h b/drivers/platform/msm/sps/sps_core.h
new file mode 100644
index 0000000..592fef8
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_core.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2011, 2013, 2015-2016, The Linux Foundation. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Function and data structure declarations.
+ */
+
+#ifndef _SPS_CORE_H_
+#define _SPS_CORE_H_
+
+#include <linux/types.h>	/* u32 */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+
+#include "spsi.h"
+#include "sps_bam.h"
+
+/* Connection state definitions */
+#define SPS_STATE_DEF(x)   ('S' | ('P' << 8) | ('S' << 16) | ((x) << 24))
+#define IS_SPS_STATE_OK(x) \
+	(((x)->client_state & 0x00ffffff) == SPS_STATE_DEF(0))
+
+/* Configuration indicating satellite connection */
+#define SPS_CONFIG_SATELLITE  0x11111111
+
+/* Client connection state */
+#define SPS_STATE_DISCONNECT  0
+#define SPS_STATE_ALLOCATE    SPS_STATE_DEF(1)
+#define SPS_STATE_CONNECT     SPS_STATE_DEF(2)
+#define SPS_STATE_ENABLE      SPS_STATE_DEF(3)
+#define SPS_STATE_DISABLE     SPS_STATE_DEF(4)
+
+
+/**
+ * Find the BAM device from the handle
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified device handle.
+ *
+ * @h - device handle of the BAM
+ *
+ * @return - pointer to the BAM device struct, or NULL on error
+ *
+ */
+struct sps_bam *sps_h2bam(unsigned long h);
+
+/**
+ * Initialize resource manager module
+ *
+ * This function initializes the resource manager module.
+ *
+ * @rm - pointer to resource manager struct
+ *
+ * @options - driver options bitflags (see SPS_OPT_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_rm_init(struct sps_rm *rm, u32 options);
+
+/**
+ * De-initialize resource manager module
+ *
+ * This function de-initializes the resource manager module.
+ *
+ */
+void sps_rm_de_init(void);
+
+/**
+ * Initialize client state context
+ *
+ * This function initializes a client state context struct.
+ *
+ * @connect - pointer to client connection state struct
+ *
+ */
+void sps_rm_config_init(struct sps_connect *connect);
+
+/**
+ * Process connection state change
+ *
+ * This function processes a connection state change.
+ *
+ * @pipe - pointer to pipe context
+ *
+ * @state - new state for connection
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_rm_state_change(struct sps_pipe *pipe, u32 state);
+
+#endif				/* _SPS_CORE_H_ */
diff --git a/drivers/platform/msm/sps/sps_dma.c b/drivers/platform/msm/sps/sps_dma.c
new file mode 100644
index 0000000..abdcabc
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_dma.c
@@ -0,0 +1,925 @@
+/* Copyright (c) 2011-2013, 2015, 2017, The Linux Foundation. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* BAM-DMA Manager. */
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+
+#include <linux/export.h>
+#include <linux/memory.h>	/* memset */
+
+#include "spsi.h"
+#include "bam.h"
+#include "sps_bam.h"		/* bam_dma_thresh_dma */
+#include "sps_core.h"		/* sps_h2bam() */
+
+/**
+ * registers
+ */
+
+#define DMA_ENBL			(0x00000000)
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+#define DMA_REVISION			(0x00000004)
+#define DMA_CONFIG			(0x00000008)
+#define DMA_CHNL_CONFIG(n)		(0x00001000 + 4096 * (n))
+#else
+#define DMA_CHNL_CONFIG(n)		(0x00000004 + 4 * (n))
+#define DMA_CONFIG			(0x00000040)
+#endif
+
+/**
+ * masks
+ */
+
+/* DMA_CHNL_confign */
+#ifdef CONFIG_SPS_SUPPORT_NDP_BAM
+#define DMA_CHNL_PRODUCER_PIPE_ENABLED	0x40000
+#define DMA_CHNL_CONSUMER_PIPE_ENABLED	0x20000
+#endif
+#define DMA_CHNL_HALT_DONE		0x10000
+#define DMA_CHNL_HALT			0x1000
+#define DMA_CHNL_ENABLE                 0x100
+#define DMA_CHNL_ACT_THRESH             0x30
+#define DMA_CHNL_WEIGHT                 0x7
+
+/* DMA_CONFIG */
+#define TESTBUS_SELECT                  0x3
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @val - value to write.
+ *
+ */
+static inline void dma_write_reg(void *base, u32 offset, u32 val)
+{
+	iowrite32(val, base + offset);
+	SPS_DBG(sps, "sps:bamdma: write reg 0x%x w_val 0x%x.", offset, val);
+}
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @base - bam base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void dma_write_reg_field(void *base, u32 offset,
+				       const u32 mask, u32 val)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 tmp = ioread32(base + offset);
+
+	tmp &= ~mask;		/* clear written bits */
+	val = tmp | (val << shift);
+	iowrite32(val, base + offset);
+	SPS_DBG(sps, "sps:bamdma: write reg 0x%x w_val 0x%x.", offset, val);
+}
+
+/* Round max number of pipes to nearest multiple of 2 */
+#define DMA_MAX_PIPES         ((BAM_MAX_PIPES / 2) * 2)
+
+/* Maximum number of BAM-DMAs supported */
+#define MAX_BAM_DMA_DEVICES   1
+
+/* Maximum number of BAMs that will be registered */
+#define MAX_BAM_DMA_BAMS      1
+
+/* Pipe enable check values */
+#define DMA_PIPES_STATE_DIFF     0
+#define DMA_PIPES_BOTH_DISABLED  1
+#define DMA_PIPES_BOTH_ENABLED   2
+
+/* Even pipe is tx/dest/input/write, odd pipe is rx/src/output/read */
+#define DMA_PIPE_IS_DEST(p)   (((p) & 1) == 0)
+#define DMA_PIPE_IS_SRC(p)    (((p) & 1) != 0)
+
+/* BAM DMA pipe state */
+enum bamdma_pipe_state {
+	PIPE_INACTIVE = 0,
+	PIPE_ACTIVE
+};
+
+/* BAM DMA channel state */
+enum bamdma_chan_state {
+	DMA_CHAN_STATE_FREE = 0,
+	DMA_CHAN_STATE_ALLOC_EXT,	/* Client allocation */
+	DMA_CHAN_STATE_ALLOC_INT	/* Internal (resource mgr) allocation */
+};
+
+struct bamdma_chan {
+	/* Allocation state */
+	enum bamdma_chan_state state;
+
+	/* BAM DMA channel configuration parameters */
+	u32 threshold;
+	enum sps_dma_priority priority;
+
+	/* HWIO channel configuration parameters */
+	enum bam_dma_thresh_dma thresh;
+	enum bam_dma_weight_dma weight;
+
+};
+
+/* BAM DMA device state */
+struct bamdma_device {
+	/* BAM-DMA device state */
+	int enabled;
+	int local;
+
+	/* BAM device state */
+	struct sps_bam *bam;
+
+	/* BAM handle, for deregistration */
+	unsigned long h;
+
+	/* BAM DMA device virtual mapping */
+	void *virt_addr;
+	int virtual_mapped;
+	phys_addr_t phys_addr;
+	void *hwio;
+
+	/* BAM DMA pipe/channel state */
+	u32 num_pipes;
+	enum bamdma_pipe_state pipes[DMA_MAX_PIPES];
+	struct bamdma_chan chans[DMA_MAX_PIPES / 2];
+
+};
+
+/* BAM-DMA devices */
+static struct bamdma_device bam_dma_dev[MAX_BAM_DMA_DEVICES];
+static struct mutex bam_dma_lock;
+
+/*
+ * The BAM DMA module registers all BAMs in the BSP properties, but only
+ * uses the first BAM-DMA device for allocations.  References to the others
+ * are stored in the following data array.
+ */
+static int num_bams;
+static unsigned long bam_handles[MAX_BAM_DMA_BAMS];
+
+/**
+ * Find BAM-DMA device
+ *
+ * This function finds the BAM-DMA device associated with the BAM handle.
+ *
+ * @h - BAM handle
+ *
+ * @return - pointer to BAM-DMA device, or NULL on error
+ *
+ */
+static struct bamdma_device *sps_dma_find_device(unsigned long h)
+{
+	return &bam_dma_dev[0];
+}
+
+/**
+ * BAM DMA device enable
+ *
+ * This function enables a BAM DMA device and the associated BAM.
+ *
+ * @dev - pointer to BAM DMA device context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_dma_device_enable(struct bamdma_device *dev)
+{
+	if (dev->enabled)
+		return 0;
+
+	/*
+	 *  If the BAM-DMA device is locally controlled then enable BAM-DMA
+	 *  device
+	 */
+	if (dev->local)
+		dma_write_reg(dev->virt_addr, DMA_ENBL, 1);
+
+	/* Enable BAM device */
+	if (sps_bam_enable(dev->bam)) {
+		SPS_ERR(sps, "sps:Failed to enable BAM DMA's BAM: %pa",
+			&dev->phys_addr);
+		return SPS_ERROR;
+	}
+
+	dev->enabled = true;
+
+	return 0;
+}
+
+/**
+ * BAM DMA device enable
+ *
+ * This function initializes a BAM DMA device.
+ *
+ * @dev - pointer to BAM DMA device context
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_dma_device_disable(struct bamdma_device *dev)
+{
+	u32 pipe_index;
+
+	if (!dev->enabled)
+		return 0;
+
+	/* Do not disable if channels active */
+	for (pipe_index = 0; pipe_index < dev->num_pipes; pipe_index++) {
+		if (dev->pipes[pipe_index] != PIPE_INACTIVE)
+			break;
+	}
+
+	if (pipe_index < dev->num_pipes) {
+		SPS_ERR(sps,
+			"sps:Fail to disable BAM-DMA %pa:channels are active",
+			&dev->phys_addr);
+		return SPS_ERROR;
+	}
+
+	dev->enabled = false;
+
+	/* Disable BAM device */
+	if (sps_bam_disable(dev->bam)) {
+		SPS_ERR(sps,
+			"sps:Fail to disable BAM-DMA BAM:%pa", &dev->phys_addr);
+		return SPS_ERROR;
+	}
+
+	/* Is the BAM-DMA device locally controlled? */
+	if (dev->local)
+		/* Disable BAM-DMA device */
+		dma_write_reg(dev->virt_addr, DMA_ENBL, 0);
+
+	return 0;
+}
+
+/**
+ * Initialize BAM DMA device
+ *
+ */
+int sps_dma_device_init(unsigned long h)
+{
+	struct bamdma_device *dev;
+	struct sps_bam_props *props;
+	int result = SPS_ERROR;
+
+	mutex_lock(&bam_dma_lock);
+
+	/* Find a free BAM-DMA device slot */
+	dev = NULL;
+	if (bam_dma_dev[0].bam != NULL) {
+		SPS_ERR(sps,
+			"sps:%s:BAM-DMA BAM device is already initialized.",
+			__func__);
+		goto exit_err;
+	} else {
+		dev = &bam_dma_dev[0];
+	}
+
+	/* Record BAM */
+	memset(dev, 0, sizeof(*dev));
+	dev->h = h;
+	dev->bam = sps_h2bam(h);
+
+	if (dev->bam == NULL) {
+		SPS_ERR(sps,
+			"sps:%s:BAM-DMA BAM device is not found from the handle.",
+			__func__);
+		goto exit_err;
+	}
+
+	/* Map the BAM DMA device into virtual space, if necessary */
+	props = &dev->bam->props;
+	dev->phys_addr = props->periph_phys_addr;
+	if (props->periph_virt_addr != NULL) {
+		dev->virt_addr = props->periph_virt_addr;
+		dev->virtual_mapped = false;
+	} else {
+		if (props->periph_virt_size == 0) {
+			SPS_ERR(sps,
+				"sps:Unable to map BAM DMA IO memory: %pa %x",
+				&dev->phys_addr, props->periph_virt_size);
+			goto exit_err;
+		}
+
+		dev->virt_addr = ioremap(dev->phys_addr,
+					  props->periph_virt_size);
+		if (dev->virt_addr == NULL) {
+			SPS_ERR(sps,
+				"sps:Unable to map BAM DMA IO memory: %pa %x",
+				&dev->phys_addr, props->periph_virt_size);
+			goto exit_err;
+		}
+		dev->virtual_mapped = true;
+	}
+	dev->hwio = (void *) dev->virt_addr;
+
+	/* Is the BAM-DMA device locally controlled? */
+	if ((props->manage & SPS_BAM_MGR_DEVICE_REMOTE) == 0) {
+		SPS_DBG3(sps, "sps:BAM-DMA is controlled locally: %pa",
+			&dev->phys_addr);
+		dev->local = true;
+	} else {
+		SPS_DBG3(sps, "sps:BAM-DMA is controlled remotely: %pa",
+			&dev->phys_addr);
+		dev->local = false;
+	}
+
+	/*
+	 * Enable the BAM DMA and determine the number of pipes/channels.
+	 * Leave the BAM-DMA enabled, since it is always a shared device.
+	 */
+	if (sps_dma_device_enable(dev))
+		goto exit_err;
+
+	dev->num_pipes = dev->bam->props.num_pipes;
+
+	result = 0;
+exit_err:
+	if (result) {
+		if (dev != NULL) {
+			if (dev->virtual_mapped)
+				iounmap(dev->virt_addr);
+
+			dev->bam = NULL;
+		}
+	}
+
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * De-initialize BAM DMA device
+ *
+ */
+int sps_dma_device_de_init(unsigned long h)
+{
+	struct bamdma_device *dev;
+	u32 pipe_index;
+	u32 chan;
+	int result = 0;
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device(h);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:BAM-DMA: not registered: %lx", h);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Check for channel leaks */
+	for (chan = 0; chan < dev->num_pipes / 2; chan++) {
+		if (dev->chans[chan].state != DMA_CHAN_STATE_FREE) {
+			SPS_ERR(sps, "sps:BAM-DMA: channel not free: %d", chan);
+			result = SPS_ERROR;
+			dev->chans[chan].state = DMA_CHAN_STATE_FREE;
+		}
+	}
+	for (pipe_index = 0; pipe_index < dev->num_pipes; pipe_index++) {
+		if (dev->pipes[pipe_index] != PIPE_INACTIVE) {
+			SPS_ERR(sps, "sps:BAM-DMA: pipe not inactive: %d",
+					pipe_index);
+			result = SPS_ERROR;
+			dev->pipes[pipe_index] = PIPE_INACTIVE;
+		}
+	}
+
+	/* Disable BAM and BAM-DMA */
+	if (sps_dma_device_disable(dev))
+		result = SPS_ERROR;
+
+	dev->h = BAM_HANDLE_INVALID;
+	dev->bam = NULL;
+	if (dev->virtual_mapped)
+		iounmap(dev->virt_addr);
+
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Initialize BAM DMA module
+ *
+ */
+int sps_dma_init(const struct sps_bam_props *bam_props)
+{
+	struct sps_bam_props props;
+	const struct sps_bam_props *bam_reg;
+	unsigned long h;
+
+	/* Init local data */
+	memset(&bam_dma_dev, 0, sizeof(bam_dma_dev));
+	num_bams = 0;
+	memset(bam_handles, 0, sizeof(bam_handles));
+
+	/* Create a mutex to control access to the BAM-DMA devices */
+	mutex_init(&bam_dma_lock);
+
+	/* Are there any BAM DMA devices? */
+	if (bam_props == NULL)
+		return 0;
+
+	/*
+	 * Registers all BAMs in the BSP properties, but only uses the first
+	 * BAM-DMA device for allocations.
+	 */
+	if (bam_props->phys_addr) {
+		/* Force multi-EE option for all BAM-DMAs */
+		bam_reg = bam_props;
+		if ((bam_props->options & SPS_BAM_OPT_BAMDMA) &&
+		    (bam_props->manage & SPS_BAM_MGR_MULTI_EE) == 0) {
+			SPS_DBG(sps,
+				"sps:Setting multi-EE options for BAM-DMA: %pa",
+				&bam_props->phys_addr);
+			props = *bam_props;
+			props.manage |= SPS_BAM_MGR_MULTI_EE;
+			bam_reg = &props;
+		}
+
+		/* Register the BAM */
+		if (sps_register_bam_device(bam_reg, &h)) {
+			SPS_ERR(sps,
+				"sps:Fail to register BAM-DMA BAM device: phys %pa",
+				&bam_props->phys_addr);
+			return SPS_ERROR;
+		}
+
+		/* Record the BAM so that it may be deregistered later */
+		if (num_bams < MAX_BAM_DMA_BAMS) {
+			bam_handles[num_bams] = h;
+			num_bams++;
+		} else {
+			SPS_ERR(sps, "sps:BAM-DMA: BAM limit exceeded: %d",
+					num_bams);
+			return SPS_ERROR;
+		}
+	} else {
+		SPS_ERR(sps,
+			"sps:%s:BAM-DMA phys_addr is zero.",
+			__func__);
+		return SPS_ERROR;
+	}
+
+
+	return 0;
+}
+
+/**
+ * De-initialize BAM DMA module
+ *
+ */
+void sps_dma_de_init(void)
+{
+	int n;
+
+	/* De-initialize the BAM devices */
+	for (n = 0; n < num_bams; n++)
+		sps_deregister_bam_device(bam_handles[n]);
+
+	/* Clear local data */
+	memset(&bam_dma_dev, 0, sizeof(bam_dma_dev));
+	num_bams = 0;
+	memset(bam_handles, 0, sizeof(bam_handles));
+}
+
+/**
+ * Allocate a BAM DMA channel
+ *
+ */
+int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan_info)
+{
+	struct bamdma_device *dev;
+	struct bamdma_chan *chan;
+	u32 pipe_index;
+	enum bam_dma_thresh_dma thresh = (enum bam_dma_thresh_dma) 0;
+	enum bam_dma_weight_dma weight = (enum bam_dma_weight_dma) 0;
+	int result = SPS_ERROR;
+
+	if (alloc == NULL || chan_info == NULL) {
+		SPS_ERR(sps,
+			"sps:%s:invalid parameters", __func__);
+		return SPS_ERROR;
+	}
+
+	/* Translate threshold and priority to hwio values */
+	if (alloc->threshold != SPS_DMA_THRESHOLD_DEFAULT) {
+		if (alloc->threshold >= 512)
+			thresh = BAM_DMA_THRESH_512;
+		else if (alloc->threshold >= 256)
+			thresh = BAM_DMA_THRESH_256;
+		else if (alloc->threshold >= 128)
+			thresh = BAM_DMA_THRESH_128;
+		else
+			thresh = BAM_DMA_THRESH_64;
+	}
+
+	weight = alloc->priority;
+
+	if ((u32)alloc->priority > (u32)BAM_DMA_WEIGHT_HIGH) {
+		SPS_ERR(sps, "sps:BAM-DMA: invalid priority: %x",
+						alloc->priority);
+		return SPS_ERROR;
+	}
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device(alloc->dev);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:BAM-DMA: invalid BAM handle: %lx",
+							alloc->dev);
+		goto exit_err;
+	}
+
+	/* Search for a free set of pipes */
+	for (pipe_index = 0, chan = dev->chans;
+	      pipe_index < dev->num_pipes; pipe_index += 2, chan++) {
+		if (chan->state == DMA_CHAN_STATE_FREE) {
+			/* Just check pipes for safety */
+			if (dev->pipes[pipe_index] != PIPE_INACTIVE ||
+			    dev->pipes[pipe_index + 1] != PIPE_INACTIVE) {
+				SPS_ERR(sps,
+					"sps:BAM-DMA: channel %d state error:%d %d",
+					pipe_index / 2, dev->pipes[pipe_index],
+				 dev->pipes[pipe_index + 1]);
+				goto exit_err;
+			}
+			break; /* Found free pipe */
+		}
+	}
+
+	if (pipe_index >= dev->num_pipes) {
+		SPS_ERR(sps, "sps:BAM-DMA: no free channel. num_pipes = %d",
+			dev->num_pipes);
+		goto exit_err;
+	}
+
+	chan->state = DMA_CHAN_STATE_ALLOC_EXT;
+
+	/* Store config values for use when pipes are activated */
+	chan = &dev->chans[pipe_index / 2];
+	chan->threshold = alloc->threshold;
+	chan->thresh = thresh;
+	chan->priority = alloc->priority;
+	chan->weight = weight;
+
+	SPS_DBG3(sps, "sps:sps_alloc_dma_chan. pipe %d.\n", pipe_index);
+
+	/* Report allocated pipes to client */
+	chan_info->dev = dev->h;
+	/* Dest/input/write pipex */
+	chan_info->dest_pipe_index = pipe_index;
+	/* Source/output/read pipe */
+	chan_info->src_pipe_index = pipe_index + 1;
+
+	result = 0;
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_alloc_dma_chan);
+
+/**
+ * Free a BAM DMA channel
+ *
+ */
+int sps_free_dma_chan(struct sps_dma_chan *chan)
+{
+	struct bamdma_device *dev;
+	u32 pipe_index;
+	int result = 0;
+
+	if (chan == NULL) {
+		SPS_ERR(sps,
+			"sps:%s:chan is NULL", __func__);
+		return SPS_ERROR;
+	}
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device(chan->dev);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:BAM-DMA: invalid BAM handle: %lx", chan->dev);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Verify the pipe indices */
+	pipe_index = chan->dest_pipe_index;
+	if (pipe_index >= dev->num_pipes || ((pipe_index & 1)) ||
+	    (pipe_index + 1) != chan->src_pipe_index) {
+		SPS_ERR(sps,
+			"sps:sps_free_dma_chan. Invalid pipe indices. num_pipes=%d.dest=%d.src=%d.",
+			dev->num_pipes,
+			chan->dest_pipe_index,
+			chan->src_pipe_index);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Are both pipes inactive? */
+	if (dev->chans[pipe_index / 2].state != DMA_CHAN_STATE_ALLOC_EXT ||
+	    dev->pipes[pipe_index] != PIPE_INACTIVE ||
+	    dev->pipes[pipe_index + 1] != PIPE_INACTIVE) {
+		SPS_ERR(sps,
+			"sps:BAM-DMA: attempt to free active chan %d: %d %d",
+			pipe_index / 2, dev->pipes[pipe_index],
+			dev->pipes[pipe_index + 1]);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	/* Free the channel */
+	dev->chans[pipe_index / 2].state = DMA_CHAN_STATE_FREE;
+
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+EXPORT_SYMBOL(sps_free_dma_chan);
+
+/**
+ * Activate a BAM DMA pipe
+ *
+ * This function activates a BAM DMA pipe.
+ *
+ * @dev - pointer to BAM-DMA device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static u32 sps_dma_check_pipes(struct bamdma_device *dev, u32 pipe_index)
+{
+	u32 pipe_in;
+	u32 pipe_out;
+	int enabled_in;
+	int enabled_out;
+	u32 check;
+
+	pipe_in = pipe_index & ~1;
+	pipe_out = pipe_in + 1;
+	enabled_in = bam_pipe_is_enabled(&dev->bam->base, pipe_in);
+	enabled_out = bam_pipe_is_enabled(&dev->bam->base, pipe_out);
+
+	if (!enabled_in && !enabled_out)
+		check = DMA_PIPES_BOTH_DISABLED;
+	else if (enabled_in && enabled_out)
+		check = DMA_PIPES_BOTH_ENABLED;
+	else
+		check = DMA_PIPES_STATE_DIFF;
+
+	return check;
+}
+
+/**
+ * Allocate a BAM DMA pipe
+ *
+ */
+int sps_dma_pipe_alloc(void *bam_arg, u32 pipe_index, enum sps_mode dir)
+{
+	struct sps_bam *bam = bam_arg;
+	struct bamdma_device *dev;
+	struct bamdma_chan *chan;
+	u32 channel;
+	int result = SPS_ERROR;
+
+	if (bam == NULL) {
+		SPS_ERR(sps, "%s", "sps:BAM context is NULL");
+		return SPS_ERROR;
+	}
+
+	/* Check pipe direction */
+	if ((DMA_PIPE_IS_DEST(pipe_index) && dir != SPS_MODE_DEST) ||
+	    (DMA_PIPE_IS_SRC(pipe_index) && dir != SPS_MODE_SRC)) {
+		SPS_ERR(sps, "sps:BAM-DMA: wrong direction for BAM %pa pipe %d",
+			&bam->props.phys_addr, pipe_index);
+		return SPS_ERROR;
+	}
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device((unsigned long) bam);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:BAM-DMA: invalid BAM: %pa",
+			&bam->props.phys_addr);
+		goto exit_err;
+	}
+	if (pipe_index >= dev->num_pipes) {
+		SPS_ERR(sps, "sps:BAM-DMA: BAM %pa invalid pipe: %d",
+			&bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+	if (dev->pipes[pipe_index] != PIPE_INACTIVE) {
+		SPS_ERR(sps, "sps:BAM-DMA: BAM %pa pipe %d already active",
+			&bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+
+	/* Mark pipe active */
+	dev->pipes[pipe_index] = PIPE_ACTIVE;
+
+	/* If channel is not allocated, make an internal allocation */
+	channel = pipe_index / 2;
+	chan = &dev->chans[channel];
+	if (chan->state != DMA_CHAN_STATE_ALLOC_EXT &&
+	    chan->state != DMA_CHAN_STATE_ALLOC_INT) {
+		chan->state = DMA_CHAN_STATE_ALLOC_INT;
+	}
+
+	result = 0;
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Enable a BAM DMA pipe
+ *
+ */
+int sps_dma_pipe_enable(void *bam_arg, u32 pipe_index)
+{
+	struct sps_bam *bam = bam_arg;
+	struct bamdma_device *dev;
+	struct bamdma_chan *chan;
+	u32 channel;
+	int result = SPS_ERROR;
+
+	SPS_DBG3(sps, "sps:sps_dma_pipe_enable.pipe %d", pipe_index);
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device((unsigned long) bam);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM-DMA: invalid BAM", __func__);
+		goto exit_err;
+	}
+	if (pipe_index >= dev->num_pipes) {
+		SPS_ERR(sps, "sps:BAM-DMA: BAM %pa invalid pipe: %d",
+			&bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+	if (dev->pipes[pipe_index] != PIPE_ACTIVE) {
+		SPS_ERR(sps, "sps:BAM-DMA: BAM %pa pipe %d not active",
+			&bam->props.phys_addr, pipe_index);
+		goto exit_err;
+	}
+
+      /*
+       * The channel must be enabled when the dest/input/write pipe
+       * is enabled
+       */
+	if (DMA_PIPE_IS_DEST(pipe_index)) {
+		/* Configure and enable the channel */
+		channel = pipe_index / 2;
+		chan = &dev->chans[channel];
+
+		if (chan->threshold != SPS_DMA_THRESHOLD_DEFAULT)
+			dma_write_reg_field(dev->virt_addr,
+					    DMA_CHNL_CONFIG(channel),
+					    DMA_CHNL_ACT_THRESH,
+					    chan->thresh);
+
+		if (chan->priority != SPS_DMA_PRI_DEFAULT)
+			dma_write_reg_field(dev->virt_addr,
+					    DMA_CHNL_CONFIG(channel),
+					    DMA_CHNL_WEIGHT,
+					    chan->weight);
+
+		dma_write_reg_field(dev->virt_addr,
+				    DMA_CHNL_CONFIG(channel),
+				    DMA_CHNL_ENABLE, 1);
+	}
+
+	result = 0;
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Deactivate a BAM DMA pipe
+ *
+ * This function deactivates a BAM DMA pipe.
+ *
+ * @dev - pointer to BAM-DMA device descriptor
+ *
+ * @bam - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_dma_deactivate_pipe_atomic(struct bamdma_device *dev,
+					  struct sps_bam *bam,
+					  u32 pipe_index)
+{
+	u32 channel;
+
+	if (dev->bam != bam)
+		return SPS_ERROR;
+	if (pipe_index >= dev->num_pipes)
+		return SPS_ERROR;
+	if (dev->pipes[pipe_index] != PIPE_ACTIVE)
+		return SPS_ERROR;	/* Pipe is not active */
+
+	SPS_DBG3(sps, "sps:BAM-DMA: deactivate pipe %d", pipe_index);
+
+	/* Mark pipe inactive */
+	dev->pipes[pipe_index] = PIPE_INACTIVE;
+
+	/*
+	 * Channel must be reset when either pipe is disabled, so just always
+	 * reset regardless of other pipe's state
+	 */
+	channel = pipe_index / 2;
+	dma_write_reg_field(dev->virt_addr, DMA_CHNL_CONFIG(channel),
+			    DMA_CHNL_ENABLE, 0);
+
+	/* If the peer pipe is also inactive, reset the channel */
+	if (sps_dma_check_pipes(dev, pipe_index) == DMA_PIPES_BOTH_DISABLED) {
+		/* Free channel if allocated internally */
+		if (dev->chans[channel].state == DMA_CHAN_STATE_ALLOC_INT)
+			dev->chans[channel].state = DMA_CHAN_STATE_FREE;
+	}
+
+	return 0;
+}
+
+/**
+ * Free a BAM DMA pipe
+ *
+ */
+int sps_dma_pipe_free(void *bam_arg, u32 pipe_index)
+{
+	struct bamdma_device *dev;
+	struct sps_bam *bam = bam_arg;
+	int result;
+
+	mutex_lock(&bam_dma_lock);
+
+	dev = sps_dma_find_device((unsigned long) bam);
+	if (dev == NULL) {
+		SPS_ERR(sps, "sps:%s:BAM-DMA: invalid BAM", __func__);
+		result = SPS_ERROR;
+		goto exit_err;
+	}
+
+	result = sps_dma_deactivate_pipe_atomic(dev, bam, pipe_index);
+
+exit_err:
+	mutex_unlock(&bam_dma_lock);
+
+	return result;
+}
+
+/**
+ * Get the BAM handle for BAM-DMA.
+ *
+ * The BAM handle should be use as source/destination in the sps_connect().
+ *
+ * @return bam handle on success, zero on error
+ */
+unsigned long sps_dma_get_bam_handle(void)
+{
+	return (unsigned long)bam_dma_dev[0].bam;
+}
+EXPORT_SYMBOL(sps_dma_get_bam_handle);
+
+/**
+ * Free the BAM handle for BAM-DMA.
+ *
+ */
+void sps_dma_free_bam_handle(unsigned long h)
+{
+}
+EXPORT_SYMBOL(sps_dma_free_bam_handle);
+
+#endif /* CONFIG_SPS_SUPPORT_BAMDMA */
diff --git a/drivers/platform/msm/sps/sps_map.c b/drivers/platform/msm/sps/sps_map.c
new file mode 100644
index 0000000..901d9cc
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_map.c
@@ -0,0 +1,140 @@
+/* Copyright (c) 2011-2013, 2015, 2017. The Linux Foundation. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * Connection mapping table management for SPS device driver.
+ */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/memory.h>	/* memset */
+
+#include "spsi.h"
+
+/* Module state */
+struct sps_map_state {
+	const struct sps_map *maps;
+	u32 num_maps;
+	u32 options;
+};
+
+static struct sps_map_state sps_maps;
+
+/**
+ * Initialize connection mapping module
+ *
+ */
+int sps_map_init(const struct sps_map *map_props, u32 options)
+{
+	const struct sps_map *maps;
+
+	/* Are there any connection mappings? */
+	memset(&sps_maps, 0, sizeof(sps_maps));
+	if (map_props == NULL)
+		return 0;
+
+	/* Init the module state */
+	sps_maps.maps = map_props;
+	sps_maps.options = options;
+	for (maps = sps_maps.maps;; maps++, sps_maps.num_maps++)
+		if (maps->src.periph_class == SPS_CLASS_INVALID &&
+		    maps->src.periph_phy_addr == SPS_ADDR_INVALID)
+			break;
+
+	SPS_DBG(sps, "sps: %d mappings", sps_maps.num_maps);
+
+	return 0;
+}
+
+/**
+ * De-initialize connection mapping module
+ *
+ */
+void sps_map_de_init(void)
+{
+	memset(&sps_maps, 0, sizeof(sps_maps));
+}
+
+/**
+ * Find matching connection mapping
+ *
+ */
+int sps_map_find(struct sps_connect *connect)
+{
+	const struct sps_map *map;
+	u32 i;
+	void *desc;
+	void *data;
+
+	/* Are there any connection mappings? */
+	if (sps_maps.num_maps == 0)
+		return SPS_ERROR;
+
+	/* Search the mapping table for a match to the specified connection */
+	for (i = sps_maps.num_maps, map = sps_maps.maps;
+	    i > 0; i--, map++)
+		if (map->src.periph_class == (u32) connect->source &&
+		    map->dest.periph_class == (u32) connect->destination
+		    && map->config == (u32) connect->config)
+			break;
+
+	if (i == 0)
+		return SPS_ERROR;
+
+	/*
+	 * Before modifying client parameter struct, perform all
+	 * operations that might fail
+	 */
+	desc = spsi_get_mem_ptr(map->desc_base);
+	if (desc == NULL) {
+		SPS_ERR(sps,
+			"sps:Cannot get virt addr for I/O buffer: %pa\n",
+			&map->desc_base);
+		return SPS_ERROR;
+	}
+
+	if (map->data_size > 0 && map->data_base != SPS_ADDR_INVALID) {
+		data = spsi_get_mem_ptr(map->data_base);
+		if (data == NULL) {
+			SPS_ERR(sps,
+				"sps:Can't get virt addr for I/O buffer: %pa",
+				&map->data_base);
+			return SPS_ERROR;
+		}
+	} else {
+		data = NULL;
+	}
+
+	/* Copy mapping values to client parameter struct */
+	if (connect->source != SPS_DEV_HANDLE_MEM)
+		connect->src_pipe_index = map->src.pipe_index;
+
+	if (connect->destination != SPS_DEV_HANDLE_MEM)
+		connect->dest_pipe_index = map->dest.pipe_index;
+
+	if (connect->mode == SPS_MODE_SRC)
+		connect->event_thresh = map->src.event_thresh;
+	else
+		connect->event_thresh = map->dest.event_thresh;
+
+	connect->desc.size = map->desc_size;
+	connect->desc.phys_base = map->desc_base;
+	connect->desc.base = desc;
+	if (map->data_size > 0 && map->data_base != SPS_ADDR_INVALID) {
+		connect->data.size = map->data_size;
+		connect->data.phys_base = map->data_base;
+		connect->data.base = data;
+	}
+
+	return 0;
+}
diff --git a/drivers/platform/msm/sps/sps_map.h b/drivers/platform/msm/sps/sps_map.h
new file mode 100644
index 0000000..2c9f8e3
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_map.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2011,2013, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* SPS driver mapping table data declarations. */
+
+
+#ifndef _SPS_MAP_H_
+#define _SPS_MAP_H_
+
+#include <linux/types.h>	/* u32 */
+
+/* End point parameters */
+struct sps_map_end_point {
+	u32 periph_class;	/* Peripheral device enumeration class */
+	phys_addr_t periph_phy_addr;	/* Peripheral base address */
+	u32 pipe_index;		/* Pipe index */
+	u32 event_thresh;	/* Pipe event threshold */
+};
+
+/* Mapping connection descriptor */
+struct sps_map {
+	/* Source end point parameters */
+	struct sps_map_end_point src;
+
+	/* Destination end point parameters */
+	struct sps_map_end_point dest;
+
+	/* Resource parameters */
+	u32 config;	 /* Configuration (stream) identifier */
+	phys_addr_t desc_base;	 /* Physical address of descriptor FIFO */
+	u32 desc_size;	 /* Size (bytes) of descriptor FIFO */
+	phys_addr_t data_base;	 /* Physical address of data FIFO */
+	u32 data_size;	 /* Size (bytes) of data FIFO */
+
+};
+
+#endif /* _SPS_MAP_H_ */
diff --git a/drivers/platform/msm/sps/sps_mem.c b/drivers/platform/msm/sps/sps_mem.c
new file mode 100644
index 0000000..16556bd
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_mem.c
@@ -0,0 +1,172 @@
+/* Copyright (c) 2011-2013, 2015, 2017, The Linux Foundation. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * Pipe-Memory allocation/free management.
+ */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/io.h>		/* ioremap() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+#include <linux/genalloc.h>	/* gen_pool_alloc() */
+#include <linux/errno.h>	/* ENOMEM */
+
+#include "sps_bam.h"
+#include "spsi.h"
+
+static phys_addr_t iomem_phys;
+static void *iomem_virt;
+static u32 iomem_size;
+static u32 iomem_offset;
+static struct gen_pool *pool;
+static u32 nid = 0xaa;
+
+/* Debug */
+static u32 total_alloc;
+static u32 total_free;
+
+/**
+ * Translate physical to virtual address
+ *
+ */
+void *spsi_get_mem_ptr(phys_addr_t phys_addr)
+{
+	void *virt = NULL;
+
+	if ((phys_addr >= iomem_phys) &&
+	    (phys_addr < (iomem_phys + iomem_size))) {
+		virt = (u8 *) iomem_virt + (phys_addr - iomem_phys);
+	} else {
+		virt = phys_to_virt(phys_addr);
+		SPS_ERR(sps, "sps:spsi_get_mem_ptr.invalid phys addr=0x%pa.",
+			&phys_addr);
+	}
+	return virt;
+}
+
+/**
+ * Allocate I/O (pipe) memory
+ *
+ */
+phys_addr_t sps_mem_alloc_io(u32 bytes)
+{
+	phys_addr_t phys_addr = SPS_ADDR_INVALID;
+	unsigned long virt_addr = 0;
+
+	virt_addr = gen_pool_alloc(pool, bytes);
+	if (virt_addr) {
+		iomem_offset = virt_addr - (uintptr_t) iomem_virt;
+		phys_addr = iomem_phys + iomem_offset;
+		total_alloc += bytes;
+	} else {
+		SPS_ERR(sps, "sps:gen_pool_alloc %d bytes fail.", bytes);
+		return SPS_ADDR_INVALID;
+	}
+
+	SPS_DBG3(sps, "sps:sps_mem_alloc_io.phys=%pa.virt=0x%lx.size=0x%x.",
+		&phys_addr, virt_addr, bytes);
+
+	return phys_addr;
+}
+
+/**
+ * Free I/O memory
+ *
+ */
+void sps_mem_free_io(phys_addr_t phys_addr, u32 bytes)
+{
+	unsigned long virt_addr = 0;
+
+	iomem_offset = phys_addr - iomem_phys;
+	virt_addr = (uintptr_t) iomem_virt + iomem_offset;
+
+	SPS_DBG3(sps, "sps:sps_mem_free_io.phys=%pa.virt=0x%lx.size=0x%x.",
+		&phys_addr, virt_addr, bytes);
+
+	gen_pool_free(pool, virt_addr, bytes);
+	total_free += bytes;
+}
+
+/**
+ * Initialize driver memory module
+ *
+ */
+int sps_mem_init(phys_addr_t pipemem_phys_base, u32 pipemem_size)
+{
+	int res;
+
+	/* 2^8=128. The desc-fifo and data-fifo minimal allocation. */
+	int min_alloc_order = 8;
+
+	if ((d_type == 0) || (d_type == 2) || imem) {
+		iomem_phys = pipemem_phys_base;
+		iomem_size = pipemem_size;
+
+		if (iomem_phys == 0) {
+			SPS_ERR(sps, "sps:%s:Invalid Pipe-Mem address",
+				__func__);
+			return SPS_ERROR;
+		}
+		iomem_virt = ioremap(iomem_phys, iomem_size);
+		if (!iomem_virt) {
+			SPS_ERR(sps,
+				"sps:%s:Failed to IO map pipe memory.\n",
+					__func__);
+			return -ENOMEM;
+		}
+
+		iomem_offset = 0;
+		SPS_DBG(sps,
+			"sps:sps_mem_init.iomem_phys=%pa,iomem_virt=0x%p.",
+			&iomem_phys, iomem_virt);
+	}
+
+	pool = gen_pool_create(min_alloc_order, nid);
+
+	if (!pool) {
+		SPS_ERR(sps, "sps:%s:Failed to create a new memory pool.\n",
+								__func__);
+		return -ENOMEM;
+	}
+
+	if ((d_type == 0) || (d_type == 2) || imem) {
+		res = gen_pool_add(pool, (uintptr_t)iomem_virt,
+				iomem_size, nid);
+		if (res)
+			return res;
+	}
+
+	return 0;
+}
+
+/**
+ * De-initialize driver memory module
+ *
+ */
+int sps_mem_de_init(void)
+{
+	if (iomem_virt != NULL) {
+		gen_pool_destroy(pool);
+		pool = NULL;
+		iounmap(iomem_virt);
+		iomem_virt = NULL;
+	}
+
+	if (total_alloc == total_free)
+		return 0;
+
+	SPS_ERR(sps, "sps:%s:some memory not free", __func__);
+	return SPS_ERROR;
+}
diff --git a/drivers/platform/msm/sps/sps_rm.c b/drivers/platform/msm/sps/sps_rm.c
new file mode 100644
index 0000000..602a256
--- /dev/null
+++ b/drivers/platform/msm/sps/sps_rm.c
@@ -0,0 +1,854 @@
+/* Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Resource management for the SPS device driver. */
+
+#include <linux/types.h>	/* u32 */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/mutex.h>	/* mutex */
+#include <linux/list.h>		/* list_head */
+#include <linux/slab.h>		/* kzalloc() */
+#include <linux/memory.h>	/* memset */
+#include <linux/interrupt.h>
+
+#include "spsi.h"
+#include "sps_core.h"
+
+/* Max BAM FIFO sizes */
+#define SPSRM_MAX_DESC_FIFO_SIZE    0xffff
+#define SPSRM_MAX_DATA_FIFO_SIZE    0xffff
+
+/* Connection control struct pointer */
+static struct sps_rm *sps_rm;
+
+/**
+ * Initialize resource manager module
+ */
+int sps_rm_init(struct sps_rm *rm, u32 options)
+{
+	/* Set the resource manager state struct pointer */
+	sps_rm = rm;
+
+	/* Initialize the state struct */
+	INIT_LIST_HEAD(&sps_rm->connections_q);
+	mutex_init(&sps_rm->lock);
+
+	return 0;
+}
+
+/**
+ * Initialize client state context
+ *
+ */
+void sps_rm_config_init(struct sps_connect *connect)
+{
+	memset(connect, SPSRM_CLEAR, sizeof(*connect));
+}
+
+/**
+ * Remove reference to connection mapping
+ *
+ * This function removes a reference from a connection mapping struct.
+ *
+ * @map - pointer to connection mapping struct
+ *
+ */
+static void sps_rm_remove_ref(struct sps_connection *map)
+{
+	/* Free this connection */
+	map->refs--;
+	if (map->refs <= 0) {
+		if (map->client_src != NULL || map->client_dest != NULL)
+			SPS_ERR(sps,
+				"sps:%s:Failed to allocate connection struct",
+				__func__);
+
+		list_del(&map->list);
+		kfree(map);
+	}
+}
+
+/**
+ * Compare map to connect parameters
+ *
+ * This function compares client connect parameters to an allocated
+ * connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return - true if match, false otherwise
+ *
+ */
+static int sps_rm_map_match(const struct sps_connect *cfg,
+			    const struct sps_connection *map)
+{
+	if (cfg->source != map->src.dev ||
+	    cfg->destination != map->dest.dev)
+		return false;
+
+	if (cfg->src_pipe_index != SPSRM_CLEAR &&
+	    cfg->src_pipe_index != map->src.pipe_index)
+		return false;
+
+	if (cfg->dest_pipe_index != SPSRM_CLEAR &&
+	    cfg->dest_pipe_index != map->dest.pipe_index)
+		return false;
+
+	if (cfg->config != map->config)
+		return false;
+
+	if (cfg->desc.size != SPSRM_CLEAR) {
+		if (cfg->desc.size != map->desc.size)
+			return false;
+
+		if (cfg->desc.phys_base != (SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+		    cfg->desc.base != (void *)(SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+		    (cfg->desc.phys_base != map->desc.phys_base ||
+		     cfg->desc.base != map->desc.base)) {
+			return false;
+		}
+	}
+
+	if (cfg->data.size != SPSRM_CLEAR) {
+		if (cfg->data.size != map->data.size)
+			return false;
+
+		if (cfg->data.phys_base != (SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+		    cfg->data.base != (void *)(SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+		    (cfg->data.phys_base != map->data.phys_base ||
+		     cfg->data.base != map->data.base))
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * Find unconnected mapping
+ *
+ * This function finds an allocated a connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return - pointer to allocated connection mapping, or NULL if not found
+ *
+ */
+static struct sps_connection *find_unconnected(struct sps_pipe *pipe)
+{
+	struct sps_connect *cfg = &pipe->connect;
+	struct sps_connection *map;
+
+	/* Has this connection already been allocated? */
+	list_for_each_entry(map, &sps_rm->connections_q, list) {
+		if (sps_rm_map_match(cfg, map))
+			if ((cfg->mode == SPS_MODE_SRC
+			     && map->client_src == NULL)
+			    || (cfg->mode != SPS_MODE_SRC
+				&& map->client_dest == NULL))
+				return map;	/* Found */
+	}
+
+	return NULL;		/* Not Found */
+}
+
+/**
+ * Assign connection to client
+ *
+ * This function assigns a connection to a client.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @map - connection mapping
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_assign(struct sps_pipe *pipe,
+			 struct sps_connection *map)
+{
+	struct sps_connect *cfg = &pipe->connect;
+
+	/* Check ownership and BAM */
+	if ((cfg->mode == SPS_MODE_SRC && map->client_src != NULL) ||
+	    (cfg->mode != SPS_MODE_SRC && map->client_dest != NULL)) {
+		SPS_ERR(sps,
+			"sps:%s:The end point is already connected.\n",
+			__func__);
+		return SPS_ERROR;
+	}
+
+	/* Check whether this end point is a BAM (not memory) */
+	if ((cfg->mode == SPS_MODE_SRC && map->src.bam == NULL) ||
+	    (cfg->mode != SPS_MODE_SRC && map->dest.bam == NULL)) {
+		SPS_ERR(sps, "sps:%s:The end point is empty.\n", __func__);
+		return SPS_ERROR;
+	}
+
+	/* Record the connection assignment */
+	if (cfg->mode == SPS_MODE_SRC) {
+		map->client_src = pipe;
+		pipe->bam = map->src.bam;
+		pipe->pipe_index = map->src.pipe_index;
+		if (pipe->connect.event_thresh != SPSRM_CLEAR)
+			map->src.event_threshold = pipe->connect.event_thresh;
+		if (pipe->connect.lock_group != SPSRM_CLEAR)
+			map->src.lock_group = pipe->connect.lock_group;
+	} else {
+		map->client_dest = pipe;
+		pipe->bam = map->dest.bam;
+		pipe->pipe_index = map->dest.pipe_index;
+		if (pipe->connect.event_thresh != SPSRM_CLEAR)
+			map->dest.event_threshold =
+			pipe->connect.event_thresh;
+		if (pipe->connect.lock_group != SPSRM_CLEAR)
+			map->dest.lock_group = pipe->connect.lock_group;
+	}
+	pipe->map = map;
+
+	SPS_DBG(pipe->bam, "sps:sps_rm_assign.bam %pa.pipe_index=%d\n",
+			BAM_ID(pipe->bam), pipe->pipe_index);
+
+	/* Copy parameters to client connect state */
+	pipe->connect.src_pipe_index = map->src.pipe_index;
+	pipe->connect.dest_pipe_index = map->dest.pipe_index;
+	pipe->connect.desc = map->desc;
+	pipe->connect.data = map->data;
+
+	pipe->client_state = SPS_STATE_ALLOCATE;
+
+	return 0;
+}
+
+/**
+ * Free connection mapping resources
+ *
+ * This function frees a connection mapping resources.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ */
+static void sps_rm_free_map_rsrc(struct sps_connection *map)
+{
+	struct sps_bam *bam;
+
+	if (map->client_src != NULL || map->client_dest != NULL)
+		return;
+
+	if (map->alloc_src_pipe != SPS_BAM_PIPE_INVALID) {
+		bam = map->src.bam;
+		sps_bam_pipe_free(bam, map->src.pipe_index);
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA))
+			/* Deallocate and free the BAM-DMA channel */
+			sps_dma_pipe_free(bam, map->src.pipe_index);
+#endif
+		map->alloc_src_pipe = SPS_BAM_PIPE_INVALID;
+		map->src.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+	if (map->alloc_dest_pipe != SPS_BAM_PIPE_INVALID) {
+		bam = map->dest.bam;
+		sps_bam_pipe_free(bam, map->dest.pipe_index);
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			/* Deallocate the BAM-DMA channel */
+			sps_dma_pipe_free(bam, map->dest.pipe_index);
+		}
+#endif
+		map->alloc_dest_pipe = SPS_BAM_PIPE_INVALID;
+		map->dest.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+	if (map->alloc_desc_base != SPS_ADDR_INVALID) {
+		sps_mem_free_io(map->alloc_desc_base, map->desc.size);
+
+		map->alloc_desc_base = SPS_ADDR_INVALID;
+		map->desc.phys_base = SPS_ADDR_INVALID;
+	}
+	if (map->alloc_data_base != SPS_ADDR_INVALID) {
+		sps_mem_free_io(map->alloc_data_base, map->data.size);
+
+		map->alloc_data_base = SPS_ADDR_INVALID;
+		map->data.phys_base = SPS_ADDR_INVALID;
+	}
+}
+
+/**
+ * Init connection mapping from client connect
+ *
+ * This function initializes a connection mapping from the client's
+ * connect parameters.
+ *
+ * @map - connection mapping struct
+ *
+ * @cfg - client connect parameters
+ *
+ * @return - pointer to allocated connection mapping, or NULL on error
+ *
+ */
+static void sps_rm_init_map(struct sps_connection *map,
+			    const struct sps_connect *cfg)
+{
+	/* Clear the connection mapping struct */
+	memset(map, 0, sizeof(*map));
+	map->desc.phys_base = SPS_ADDR_INVALID;
+	map->data.phys_base = SPS_ADDR_INVALID;
+	map->alloc_desc_base = SPS_ADDR_INVALID;
+	map->alloc_data_base = SPS_ADDR_INVALID;
+	map->alloc_src_pipe = SPS_BAM_PIPE_INVALID;
+	map->alloc_dest_pipe = SPS_BAM_PIPE_INVALID;
+
+	/* Copy client required parameters */
+	map->src.dev = cfg->source;
+	map->dest.dev = cfg->destination;
+	map->desc.size = cfg->desc.size;
+	map->data.size = cfg->data.size;
+	map->config = cfg->config;
+
+	/* Did client specify descriptor FIFO? */
+	if (map->desc.size != SPSRM_CLEAR &&
+	    cfg->desc.phys_base != (SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+	    cfg->desc.base != (void *)(SPSRM_CLEAR|SPSRM_ADDR_CLR))
+		map->desc = cfg->desc;
+
+	/* Did client specify data FIFO? */
+	if (map->data.size != SPSRM_CLEAR &&
+	    cfg->data.phys_base != (SPSRM_CLEAR|SPSRM_ADDR_CLR) &&
+	    cfg->data.base != (void *)(SPSRM_CLEAR|SPSRM_ADDR_CLR))
+		map->data = cfg->data;
+
+	/* Did client specify source pipe? */
+	if (cfg->src_pipe_index != SPSRM_CLEAR)
+		map->src.pipe_index = cfg->src_pipe_index;
+	else
+		map->src.pipe_index = SPS_BAM_PIPE_INVALID;
+
+
+	/* Did client specify destination pipe? */
+	if (cfg->dest_pipe_index != SPSRM_CLEAR)
+		map->dest.pipe_index = cfg->dest_pipe_index;
+	else
+		map->dest.pipe_index = SPS_BAM_PIPE_INVALID;
+}
+
+/**
+ * Create a new connection mapping
+ *
+ * This function creates a new connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return - pointer to allocated connection mapping, or NULL on error
+ *
+ */
+static struct sps_connection *sps_rm_create(struct sps_pipe *pipe)
+{
+	struct sps_connection *map;
+	struct sps_bam *bam;
+	u32 desc_size;
+	u32 data_size;
+	enum sps_mode dir;
+	int success = false;
+
+	/* Allocate new connection */
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (map == NULL) {
+		SPS_ERR(sps,
+			"sps:%s:Failed to allocate connection struct",
+			__func__);
+		return NULL;
+	}
+
+	/* Initialize connection struct */
+	sps_rm_init_map(map, &pipe->connect);
+	dir = pipe->connect.mode;
+
+	/* Use a do/while() loop to avoid a "goto" */
+	success = false;
+	/* Get BAMs */
+	map->src.bam = sps_h2bam(map->src.dev);
+	if (map->src.bam == NULL) {
+		if (map->src.dev != SPS_DEV_HANDLE_MEM) {
+			SPS_ERR(sps, "sps:Invalid BAM handle: %pa",
+							&map->src.dev);
+			goto exit_err;
+		}
+		map->src.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+	map->dest.bam = sps_h2bam(map->dest.dev);
+	if (map->dest.bam == NULL) {
+		if (map->dest.dev != SPS_DEV_HANDLE_MEM) {
+			SPS_ERR(sps, "sps:Invalid BAM handle: %pa",
+							&map->dest.dev);
+			goto exit_err;
+		}
+		map->dest.pipe_index = SPS_BAM_PIPE_INVALID;
+	}
+
+	/* Check the BAM device for the pipe */
+	if ((dir == SPS_MODE_SRC && map->src.bam == NULL) ||
+	    (dir != SPS_MODE_SRC && map->dest.bam == NULL)) {
+		SPS_ERR(sps, "sps:Invalid BAM endpt: dir %d src %pa dest %pa",
+			dir, &map->src.dev, &map->dest.dev);
+		goto exit_err;
+	}
+
+	/* Allocate pipes and copy BAM parameters */
+	if (map->src.bam != NULL) {
+		/* Allocate the pipe */
+		bam = map->src.bam;
+		map->alloc_src_pipe = sps_bam_pipe_alloc(bam,
+							map->src.pipe_index);
+		if (map->alloc_src_pipe == SPS_BAM_PIPE_INVALID)
+			goto exit_err;
+		map->src.pipe_index = map->alloc_src_pipe;
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			int rc;
+			/* Allocate the BAM-DMA channel */
+			rc = sps_dma_pipe_alloc(bam, map->src.pipe_index,
+						 SPS_MODE_SRC);
+			if (rc) {
+				SPS_ERR(bam,
+					"sps:Failed to alloc BAM-DMA pipe: %d",
+					map->src.pipe_index);
+				goto exit_err;
+			}
+		}
+#endif
+		map->src.bam_phys = bam->props.phys_addr;
+		map->src.event_threshold = bam->props.event_threshold;
+	}
+	if (map->dest.bam != NULL) {
+		/* Allocate the pipe */
+		bam = map->dest.bam;
+		map->alloc_dest_pipe = sps_bam_pipe_alloc(bam,
+							 map->dest.pipe_index);
+		if (map->alloc_dest_pipe == SPS_BAM_PIPE_INVALID)
+			goto exit_err;
+
+		map->dest.pipe_index = map->alloc_dest_pipe;
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			int rc;
+			/* Allocate the BAM-DMA channel */
+			rc = sps_dma_pipe_alloc(bam, map->dest.pipe_index,
+					       SPS_MODE_DEST);
+			if (rc) {
+				SPS_ERR(bam,
+					"sps:Failed to alloc BAM-DMA pipe: %d",
+					map->dest.pipe_index);
+				goto exit_err;
+			}
+		}
+#endif
+		map->dest.bam_phys = bam->props.phys_addr;
+		map->dest.event_threshold =
+		bam->props.event_threshold;
+	}
+
+	/* Get default FIFO sizes */
+	desc_size = 0;
+	data_size = 0;
+	if (map->src.bam != NULL) {
+		bam = map->src.bam;
+		desc_size = bam->props.desc_size;
+		data_size = bam->props.data_size;
+	}
+	if (map->dest.bam != NULL) {
+		bam = map->dest.bam;
+		if (bam->props.desc_size > desc_size)
+			desc_size = bam->props.desc_size;
+		if (bam->props.data_size > data_size)
+			data_size = bam->props.data_size;
+	}
+
+	/* Set FIFO sizes */
+	if (map->desc.size == SPSRM_CLEAR)
+		map->desc.size = desc_size;
+	if (map->src.bam != NULL && map->dest.bam != NULL) {
+		/* BAM-to-BAM requires data FIFO */
+		if (map->data.size == SPSRM_CLEAR)
+			map->data.size = data_size;
+	} else {
+		map->data.size = 0;
+	}
+	if (map->desc.size > SPSRM_MAX_DESC_FIFO_SIZE) {
+		SPS_ERR(sps, "sps:Invalid desc FIFO size: 0x%x",
+						map->desc.size);
+		goto exit_err;
+	}
+	if (map->src.bam != NULL && map->dest.bam != NULL &&
+	    map->data.size > SPSRM_MAX_DATA_FIFO_SIZE) {
+		SPS_ERR(sps, "sps:Invalid data FIFO size: 0x%x",
+						map->data.size);
+		goto exit_err;
+	}
+
+	/* Allocate descriptor FIFO if necessary */
+	if (map->desc.size && map->desc.phys_base == SPS_ADDR_INVALID) {
+		map->alloc_desc_base = sps_mem_alloc_io(map->desc.size);
+		if (map->alloc_desc_base == SPS_ADDR_INVALID) {
+			SPS_ERR(sps, "sps:I/O memory allocation failure:0x%x",
+				map->desc.size);
+			goto exit_err;
+		}
+		map->desc.phys_base = map->alloc_desc_base;
+		map->desc.base = spsi_get_mem_ptr(map->desc.phys_base);
+		if (map->desc.base == NULL) {
+			SPS_ERR(sps,
+				"sps:Cannot get virt addr for I/O buffer:%pa",
+				&map->desc.phys_base);
+			goto exit_err;
+		}
+	}
+
+	/* Allocate data FIFO if necessary */
+	if (map->data.size && map->data.phys_base == SPS_ADDR_INVALID) {
+		map->alloc_data_base = sps_mem_alloc_io(map->data.size);
+		if (map->alloc_data_base == SPS_ADDR_INVALID) {
+			SPS_ERR(sps, "sps:I/O memory allocation failure:0x%x",
+				map->data.size);
+			goto exit_err;
+		}
+		map->data.phys_base = map->alloc_data_base;
+		map->data.base = spsi_get_mem_ptr(map->data.phys_base);
+		if (map->data.base == NULL) {
+			SPS_ERR(sps,
+				"sps:Cannot get virt addr for I/O buffer:%pa",
+				&map->data.phys_base);
+			goto exit_err;
+		}
+	}
+
+	/* Attempt to assign this connection to the client */
+	if (sps_rm_assign(pipe, map)) {
+		SPS_ERR(sps,
+		"sps:%s:failed to assign a connection to the client.\n",
+			__func__);
+		goto exit_err;
+	}
+
+	/* Initialization was successful */
+	success = true;
+exit_err:
+
+	/* If initialization failed, free resources */
+	if (!success) {
+		sps_rm_free_map_rsrc(map);
+		kfree(map);
+		return NULL;
+	}
+
+	return map;
+}
+
+/**
+ * Free connection mapping
+ *
+ * This function frees a connection mapping.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_free(struct sps_pipe *pipe)
+{
+	struct sps_connection *map = (void *)pipe->map;
+	struct sps_connect *cfg = &pipe->connect;
+
+	mutex_lock(&sps_rm->lock);
+
+	/* Free this connection */
+	if (cfg->mode == SPS_MODE_SRC)
+		map->client_src = NULL;
+	else
+		map->client_dest = NULL;
+
+	pipe->map = NULL;
+	pipe->client_state = SPS_STATE_DISCONNECT;
+	sps_rm_free_map_rsrc(map);
+
+	sps_rm_remove_ref(map);
+
+	mutex_unlock(&sps_rm->lock);
+
+	return 0;
+}
+
+/**
+ * Allocate an SPS connection end point
+ *
+ * This function allocates resources and initializes a BAM connection.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_alloc(struct sps_pipe *pipe)
+{
+	struct sps_connection *map;
+	int result = SPS_ERROR;
+
+	if (pipe->connect.sps_reserved != SPSRM_CLEAR) {
+		/*
+		 * Client did not call sps_get_config()	to init
+		 * struct sps_connect, so only use legacy members.
+		 */
+		unsigned long source = pipe->connect.source;
+		unsigned long destination = pipe->connect.destination;
+		enum sps_mode mode = pipe->connect.mode;
+		u32 config = pipe->connect.config;
+
+		memset(&pipe->connect, SPSRM_CLEAR,
+			      sizeof(pipe->connect));
+		pipe->connect.source = source;
+		pipe->connect.destination = destination;
+		pipe->connect.mode = mode;
+		pipe->connect.config = config;
+	}
+	if (pipe->connect.config == SPSRM_CLEAR)
+		pipe->connect.config = SPS_CONFIG_DEFAULT;
+
+	/*
+	 *  If configuration is not default, then client is specifying a
+	 * connection mapping.  Find a matching mapping, or fail.
+	 * If a match is found, the client's Connect struct will be updated
+	 * with all the mapping's values.
+	 */
+	if (pipe->connect.config != SPS_CONFIG_DEFAULT) {
+		if (sps_map_find(&pipe->connect)) {
+			SPS_ERR(sps,
+				"sps:%s:Failed to find connection mapping",
+								__func__);
+			return SPS_ERROR;
+		}
+	}
+
+	mutex_lock(&sps_rm->lock);
+	/* Check client state */
+	if (IS_SPS_STATE_OK(pipe)) {
+		SPS_ERR(sps,
+			"sps:%s:Client connection already allocated",
+							__func__);
+		goto exit_err;
+	}
+
+	/* Are the connection resources already allocated? */
+	map = find_unconnected(pipe);
+	if (map != NULL) {
+		/* Attempt to assign this connection to the client */
+		if (sps_rm_assign(pipe, map))
+			/* Assignment failed, so must allocate new */
+			map = NULL;
+	}
+
+	/* Allocate a new connection if necessary */
+	if (map == NULL) {
+		map = sps_rm_create(pipe);
+		if (map == NULL) {
+			SPS_ERR(sps,
+				"sps:%s:Failed to allocate connection",
+							__func__);
+			goto exit_err;
+		}
+		list_add_tail(&map->list, &sps_rm->connections_q);
+	}
+
+	/* Add the connection to the allocated queue */
+	map->refs++;
+
+	/* Initialization was successful */
+	result = 0;
+exit_err:
+	mutex_unlock(&sps_rm->lock);
+
+	if (result)
+		return SPS_ERROR;
+
+	return 0;
+}
+
+/**
+ * Disconnect an SPS connection end point
+ *
+ * This function frees resources and de-initializes a BAM connection.
+ *
+ * @pipe - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+static int sps_rm_disconnect(struct sps_pipe *pipe)
+{
+	sps_rm_free(pipe);
+	return 0;
+}
+
+/**
+ * Process connection state change
+ *
+ * This function processes a connection state change.
+ *
+ * @pipe - pointer to client context
+ *
+ * @state - new state for connection
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_rm_state_change(struct sps_pipe *pipe, u32 state)
+{
+	int auto_enable = false;
+	int result;
+
+	/* Allocate the pipe */
+	if (pipe->client_state == SPS_STATE_DISCONNECT &&
+	    state == SPS_STATE_ALLOCATE) {
+		if (sps_rm_alloc(pipe)) {
+			SPS_ERR(pipe->bam,
+				"sps:Fail to allocate resource for BAM 0x%p pipe %d.\n",
+					pipe->bam, pipe->pipe_index);
+			return SPS_ERROR;
+		}
+	}
+
+	/* Configure the pipe */
+	if (pipe->client_state == SPS_STATE_ALLOCATE &&
+	    state == SPS_STATE_CONNECT) {
+		/* Connect the BAM pipe */
+		struct sps_bam_connect_param params;
+
+		memset(&params, 0, sizeof(params));
+		params.mode = pipe->connect.mode;
+		if (pipe->connect.options != SPSRM_CLEAR) {
+			params.options = pipe->connect.options;
+			params.irq_gen_addr = pipe->connect.irq_gen_addr;
+			params.irq_gen_data = pipe->connect.irq_gen_data;
+		}
+		result = sps_bam_pipe_connect(pipe, &params);
+		if (result) {
+			SPS_ERR(pipe->bam,
+				"sps:Failed to connect BAM 0x%p pipe %d",
+					pipe->bam, pipe->pipe_index);
+			return SPS_ERROR;
+		}
+		pipe->client_state = SPS_STATE_CONNECT;
+
+		/* Set auto-enable for system-mode connections */
+		if (pipe->connect.source == SPS_DEV_HANDLE_MEM ||
+		    pipe->connect.destination == SPS_DEV_HANDLE_MEM) {
+			if (pipe->map->desc.size != 0 &&
+			    pipe->map->desc.phys_base != SPS_ADDR_INVALID)
+				auto_enable = true;
+		}
+	}
+
+	/* Enable the pipe data flow */
+	if (pipe->client_state == SPS_STATE_CONNECT &&
+	    !(state == SPS_STATE_DISABLE
+	      || state == SPS_STATE_DISCONNECT)
+	    && (state == SPS_STATE_ENABLE || auto_enable
+		|| (pipe->connect.options & SPS_O_AUTO_ENABLE))) {
+		result = sps_bam_pipe_enable(pipe->bam, pipe->pipe_index);
+		if (result) {
+			SPS_ERR(pipe->bam,
+				"sps:Failed to set BAM %pa pipe %d flow on",
+				&pipe->bam->props.phys_addr,
+				pipe->pipe_index);
+			return SPS_ERROR;
+		}
+
+		/* Is this a BAM-DMA pipe? */
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+		if ((pipe->bam->props.options & SPS_BAM_OPT_BAMDMA)) {
+			/* Activate the BAM-DMA channel */
+			result = sps_dma_pipe_enable(pipe->bam,
+						     pipe->pipe_index);
+			if (result) {
+				SPS_ERR(pipe->bam,
+					"sps:Failed to activate BAM-DMA pipe: %d",
+					pipe->pipe_index);
+				return SPS_ERROR;
+			}
+		}
+#endif
+		pipe->client_state = SPS_STATE_ENABLE;
+	}
+
+	/* Disable the pipe data flow */
+	if (pipe->client_state == SPS_STATE_ENABLE &&
+	    (state == SPS_STATE_DISABLE	|| state == SPS_STATE_DISCONNECT)) {
+		result = sps_bam_pipe_disable(pipe->bam, pipe->pipe_index);
+		if (result) {
+			SPS_ERR(pipe->bam,
+				"sps:Failed to set BAM %pa pipe %d flow off",
+				&pipe->bam->props.phys_addr,
+				pipe->pipe_index);
+			return SPS_ERROR;
+		}
+		pipe->client_state = SPS_STATE_CONNECT;
+	}
+
+	/* Disconnect the BAM pipe */
+	if (pipe->client_state == SPS_STATE_CONNECT &&
+	    state == SPS_STATE_DISCONNECT) {
+		struct sps_connection *map;
+		struct sps_bam *bam = pipe->bam;
+		unsigned long flags;
+		u32 pipe_index;
+
+		if (pipe->connect.mode == SPS_MODE_SRC)
+			pipe_index = pipe->map->src.pipe_index;
+		else
+			pipe_index = pipe->map->dest.pipe_index;
+
+		if (bam->props.irq > 0)
+			synchronize_irq(bam->props.irq);
+
+		spin_lock_irqsave(&bam->isr_lock, flags);
+		pipe->disconnecting = true;
+		spin_unlock_irqrestore(&bam->isr_lock, flags);
+		result = sps_bam_pipe_disconnect(pipe->bam, pipe_index);
+		if (result) {
+			SPS_ERR(pipe->bam,
+				"sps:Failed to disconnect BAM %pa pipe %d",
+				&pipe->bam->props.phys_addr,
+				pipe->pipe_index);
+			return SPS_ERROR;
+		}
+
+		/* Clear map state */
+		map = (void *)pipe->map;
+		if (pipe->connect.mode == SPS_MODE_SRC)
+			map->client_src = NULL;
+		else if (pipe->connect.mode == SPS_MODE_DEST)
+			map->client_dest = NULL;
+
+		sps_rm_disconnect(pipe);
+
+		/* Clear the client state */
+		pipe->map = NULL;
+		pipe->bam = NULL;
+		pipe->client_state = SPS_STATE_DISCONNECT;
+	}
+
+	return 0;
+}
diff --git a/drivers/platform/msm/sps/spsi.h b/drivers/platform/msm/sps/spsi.h
new file mode 100644
index 0000000..2e57f7d
--- /dev/null
+++ b/drivers/platform/msm/sps/spsi.h
@@ -0,0 +1,547 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * Smart-Peripheral-Switch (SPS) internal API.
+ */
+
+#ifndef _SPSI_H_
+#define _SPSI_H_
+
+#include <linux/types.h>	/* u32 */
+#include <linux/list.h>		/* list_head */
+#include <linux/kernel.h>	/* pr_info() */
+#include <linux/compiler.h>
+#include <linux/ratelimit.h>
+#include <linux/ipc_logging.h>
+
+#include <linux/msm-sps.h>
+
+#include "sps_map.h"
+
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_ARM_LPAE)
+#define SPS_LPAE (true)
+#else
+#define SPS_LPAE (false)
+#endif
+
+#define BAM_MAX_PIPES              31
+#define BAM_MAX_P_LOCK_GROUP_NUM   31
+
+/* Adjust for offset of struct sps_q_event */
+#define SPS_EVENT_INDEX(e)    ((e) - 1)
+#define SPS_ERROR -1
+
+/* BAM identifier used in log messages */
+#define BAM_ID(dev)       (&(dev)->props.phys_addr)
+
+/* "Clear" value for the connection parameter struct */
+#define SPSRM_CLEAR     0xccccccccUL
+#define SPSRM_ADDR_CLR \
+	((sizeof(int) == sizeof(long)) ? 0 : (SPSRM_CLEAR << 32))
+
+#define MAX_MSG_LEN 80
+#define SPS_IPC_LOGPAGES 10
+#define SPS_IPC_REG_DUMP_FACTOR 3
+#define SPS_IPC_DEFAULT_LOGLEVEL 3
+#define SPS_IPC_MAX_LOGLEVEL 4
+
+/* Connection mapping control struct */
+struct sps_rm {
+	struct list_head connections_q;
+	struct mutex lock;
+};
+
+/* SPS driver state struct */
+struct sps_drv {
+	struct class *dev_class;
+	dev_t dev_num;
+	struct device *dev;
+	struct clk *pmem_clk;
+	struct clk *bamdma_clk;
+	struct clk *dfab_clk;
+
+	int is_ready;
+
+	/* Platform data */
+	phys_addr_t pipemem_phys_base;
+	u32 pipemem_size;
+	phys_addr_t bamdma_bam_phys_base;
+	u32 bamdma_bam_size;
+	phys_addr_t bamdma_dma_phys_base;
+	u32 bamdma_dma_size;
+	u32 bamdma_irq;
+	u32 bamdma_restricted_pipes;
+
+	/* Driver options bitflags (see SPS_OPT_*) */
+	u32 options;
+
+	/* Mutex to protect BAM and connection queues */
+	struct mutex lock;
+
+	/* BAM devices */
+	struct list_head bams_q;
+
+	char *hal_bam_version;
+
+	/* Connection control state */
+	struct sps_rm connection_ctrl;
+
+	void *ipc_log0;
+	void *ipc_log1;
+	void *ipc_log2;
+	void *ipc_log3;
+	void *ipc_log4;
+
+	u32 ipc_loglevel;
+};
+
+extern struct sps_drv *sps;
+extern u32 d_type;
+extern bool enhd_pipe;
+extern bool imem;
+extern enum sps_bam_type bam_type;
+
+#ifdef CONFIG_DEBUG_FS
+extern u8 debugfs_record_enabled;
+extern u8 logging_option;
+extern u8 debug_level_option;
+extern u8 print_limit_option;
+
+#define SPS_IPC(idx, dev, msg, args...) do { \
+		if (dev) { \
+			if ((idx == 0) && (dev)->ipc_log0) \
+				ipc_log_string((dev)->ipc_log0, \
+					"%s: " msg, __func__, args); \
+			else if ((idx == 1) && (dev)->ipc_log1) \
+				ipc_log_string((dev)->ipc_log1, \
+					"%s: " msg, __func__, args); \
+			else if ((idx == 2) && (dev)->ipc_log2) \
+				ipc_log_string((dev)->ipc_log2, \
+					"%s: " msg, __func__, args); \
+			else if ((idx == 3) && (dev)->ipc_log3) \
+				ipc_log_string((dev)->ipc_log3, \
+					"%s: " msg, __func__, args); \
+			else if ((idx == 4) && (dev)->ipc_log4) \
+				ipc_log_string((dev)->ipc_log4, \
+					"%s: " msg, __func__, args); \
+			else \
+				pr_debug("sps: no such IPC logging index!\n"); \
+		} \
+	} while (0)
+#define SPS_DUMP(msg, args...) do {					\
+		SPS_IPC(4, sps, msg, args); \
+		if (sps) { \
+			if (sps->ipc_log4 == NULL) \
+				pr_info(msg, ##args);	\
+		} \
+	} while (0)
+#define SPS_DEBUGFS(msg, args...) do {					\
+		char buf[MAX_MSG_LEN];		\
+		snprintf(buf, MAX_MSG_LEN, msg"\n", ##args);	\
+		sps_debugfs_record(buf);	\
+	} while (0)
+#define SPS_ERR(dev, msg, args...) do {					\
+		if (logging_option != 1) {	\
+			if (unlikely(print_limit_option > 2))	\
+				pr_err_ratelimited(msg, ##args);	\
+			else	\
+				pr_err(msg, ##args);	\
+		}	\
+		if (unlikely(debugfs_record_enabled))	\
+			SPS_DEBUGFS(msg, ##args);	\
+		SPS_IPC(3, dev, msg, args); \
+	} while (0)
+#define SPS_INFO(dev, msg, args...) do {				\
+		if (logging_option != 1) {	\
+			if (unlikely(print_limit_option > 1))	\
+				pr_info_ratelimited(msg, ##args);	\
+			else	\
+				pr_info(msg, ##args);	\
+		}	\
+		if (unlikely(debugfs_record_enabled))	\
+			SPS_DEBUGFS(msg, ##args);	\
+		SPS_IPC(3, dev, msg, args); \
+	} while (0)
+#define SPS_DBG(dev, msg, args...) do {					\
+		if ((unlikely(logging_option > 1))	\
+			&& (unlikely(debug_level_option > 3))) {\
+			if (unlikely(print_limit_option > 0))	\
+				pr_info_ratelimited(msg, ##args);	\
+			else	\
+				pr_info(msg, ##args);	\
+		} else	\
+			pr_debug(msg, ##args);	\
+		if (unlikely(debugfs_record_enabled))	\
+			SPS_DEBUGFS(msg, ##args);	\
+		if (dev) { \
+			if ((dev)->ipc_loglevel <= 0)	\
+				SPS_IPC(0, dev, msg, args); \
+		}	\
+	} while (0)
+#define SPS_DBG1(dev, msg, args...) do {				\
+		if ((unlikely(logging_option > 1))	\
+			&& (unlikely(debug_level_option > 2))) {\
+			if (unlikely(print_limit_option > 0))	\
+				pr_info_ratelimited(msg, ##args);	\
+			else	\
+				pr_info(msg, ##args);	\
+		} else	\
+			pr_debug(msg, ##args);	\
+		if (unlikely(debugfs_record_enabled))	\
+			SPS_DEBUGFS(msg, ##args);	\
+		if (dev) { \
+			if ((dev)->ipc_loglevel <= 1)	\
+				SPS_IPC(1, dev, msg, args);	\
+		}	\
+	} while (0)
+#define SPS_DBG2(dev, msg, args...) do {				\
+		if ((unlikely(logging_option > 1))	\
+			&& (unlikely(debug_level_option > 1))) {\
+			if (unlikely(print_limit_option > 0))	\
+				pr_info_ratelimited(msg, ##args);	\
+			else	\
+				pr_info(msg, ##args);	\
+		} else	\
+			pr_debug(msg, ##args);	\
+		if (unlikely(debugfs_record_enabled))	\
+			SPS_DEBUGFS(msg, ##args);	\
+		if (dev) { \
+			if ((dev)->ipc_loglevel <= 2)	\
+				SPS_IPC(2, dev, msg, args); \
+		}	\
+	} while (0)
+#define SPS_DBG3(dev, msg, args...) do {				\
+		if ((unlikely(logging_option > 1))	\
+			&& (unlikely(debug_level_option > 0))) {\
+			if (unlikely(print_limit_option > 0))	\
+				pr_info_ratelimited(msg, ##args);	\
+			else	\
+				pr_info(msg, ##args);	\
+		} else	\
+			pr_debug(msg, ##args);	\
+		if (unlikely(debugfs_record_enabled))	\
+			SPS_DEBUGFS(msg, ##args);	\
+		if (dev) { \
+			if ((dev)->ipc_loglevel <= 3)	\
+				SPS_IPC(3, dev, msg, args); \
+		}	\
+	} while (0)
+#else
+#define	SPS_DBG3(x...)		pr_debug(x)
+#define	SPS_DBG2(x...)		pr_debug(x)
+#define	SPS_DBG1(x...)		pr_debug(x)
+#define	SPS_DBG(x...)		pr_debug(x)
+#define	SPS_INFO(x...)		pr_info(x)
+#define	SPS_ERR(x...)		pr_err(x)
+#define	SPS_DUMP(x...)		pr_info(x)
+#endif
+
+/* End point parameters */
+struct sps_conn_end_pt {
+	unsigned long dev;		/* Device handle of BAM */
+	phys_addr_t bam_phys;		/* Physical address of BAM. */
+	u32 pipe_index;		/* Pipe index */
+	u32 event_threshold;	/* Pipe event threshold */
+	u32 lock_group;	/* The lock group this pipe belongs to */
+	void *bam;
+};
+
+/* Connection bookkeeping descriptor struct */
+struct sps_connection {
+	struct list_head list;
+
+	/* Source end point parameters */
+	struct sps_conn_end_pt src;
+
+	/* Destination end point parameters */
+	struct sps_conn_end_pt dest;
+
+	/* Resource parameters */
+	struct sps_mem_buffer desc;	/* Descriptor FIFO */
+	struct sps_mem_buffer data;	/* Data FIFO (BAM-to-BAM mode only) */
+	u32 config;		/* Client specified connection configuration */
+
+	/* Connection state */
+	void *client_src;
+	void *client_dest;
+	int refs;		/* Reference counter */
+
+	/* Dynamically allocated resources, if required */
+	u32 alloc_src_pipe;	/* Source pipe index */
+	u32 alloc_dest_pipe;	/* Destination pipe index */
+	/* Physical address of descriptor FIFO */
+	phys_addr_t alloc_desc_base;
+	phys_addr_t alloc_data_base;	/* Physical address of data FIFO */
+};
+
+/* Event bookkeeping descriptor struct */
+struct sps_q_event {
+	struct list_head list;
+	/* Event payload data */
+	struct sps_event_notify notify;
+};
+
+/* Memory heap statistics */
+struct sps_mem_stats {
+	u32 base_addr;
+	u32 size;
+	u32 blocks_used;
+	u32 bytes_used;
+	u32 max_bytes_used;
+};
+
+enum sps_bam_type {
+	SPS_BAM_LEGACY,
+	SPS_BAM_NDP,
+	SPS_BAM_NDP_4K
+};
+
+#ifdef CONFIG_DEBUG_FS
+/* record debug info for debugfs */
+void sps_debugfs_record(const char *msg);
+#endif
+
+/* output the content of BAM-level registers */
+void print_bam_reg(void *virt_addr);
+
+/* output the content of BAM pipe registers */
+void print_bam_pipe_reg(void *virt_addr, u32 pipe_index);
+
+/* output the content of selected BAM-level registers */
+void print_bam_selected_reg(void *virt_addr, u32 pipe_index);
+
+/* output the content of selected BAM pipe registers */
+void print_bam_pipe_selected_reg(void *virt_addr, u32 pipe_index);
+
+/* output descriptor FIFO of a pipe */
+void print_bam_pipe_desc_fifo(void *virt_addr, u32 pipe_index, u32 option);
+
+/* output BAM_TEST_BUS_REG */
+void print_bam_test_bus_reg(void *base, u32 tb_sel);
+
+/* halt and un-halt a pipe */
+void bam_pipe_halt(void *base, u32 pipe, bool halt);
+
+/**
+ * Translate physical to virtual address
+ *
+ * This Function translates physical to virtual address.
+ *
+ * @phys_addr - physical address to translate
+ *
+ * @return virtual memory pointer
+ *
+ */
+void *spsi_get_mem_ptr(phys_addr_t phys_addr);
+
+/**
+ * Allocate I/O (pipe) memory
+ *
+ * This function allocates target I/O (pipe) memory.
+ *
+ * @bytes - number of bytes to allocate
+ *
+ * @return physical address of allocated memory, or SPS_ADDR_INVALID on error
+ */
+phys_addr_t sps_mem_alloc_io(u32 bytes);
+
+/**
+ * Free I/O (pipe) memory
+ *
+ * This function frees target I/O (pipe) memory.
+ *
+ * @phys_addr - physical address of memory to free
+ *
+ * @bytes - number of bytes to free.
+ */
+void sps_mem_free_io(phys_addr_t phys_addr, u32 bytes);
+
+/**
+ * Find matching connection mapping
+ *
+ * This function searches for a connection mapping that matches the
+ * parameters supplied by the client.  If a match is found, the client's
+ * parameter struct is updated with the values specified in the mapping.
+ *
+ * @connect - pointer to client connection parameters
+ *
+ * @return 0 if match is found, negative value otherwise
+ *
+ */
+int sps_map_find(struct sps_connect *connect);
+
+/**
+ * Allocate a BAM DMA pipe
+ *
+ * This function allocates a BAM DMA pipe, and is intended to be called
+ * internally from the BAM resource manager.  Allocation implies that
+ * the pipe has been referenced by a client Connect() and is in use.
+ *
+ * BAM DMA is permissive with activations, and allows a pipe to be allocated
+ * with or without a client-initiated allocation.  This allows the client to
+ * specify exactly which pipe should be used directly through the Connect() API.
+ * sps_dma_alloc_chan() does not allow the client to specify the pipes/channel.
+ *
+ * @bam - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @dir - pipe direction
+ *
+ * @return 0 on success, negative value on error
+ */
+int sps_dma_pipe_alloc(void *bam, u32 pipe_index, enum sps_mode dir);
+
+/**
+ * Enable a BAM DMA pipe
+ *
+ * This function enables the channel associated with a BAM DMA pipe, and
+ * is intended to be called internally from the BAM resource manager.
+ * Enable must occur *after* the pipe has been enabled so that proper
+ * sequencing between pipe and DMA channel enables can be enforced.
+ *
+ * @bam - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_pipe_enable(void *bam, u32 pipe_index);
+
+/**
+ * Free a BAM DMA pipe
+ *
+ * This function disables and frees a BAM DMA pipe, and is intended to be
+ * called internally from the BAM resource manager.  This must occur *after*
+ * the pipe has been disabled/reset so that proper sequencing between pipe and
+ * DMA channel resets can be enforced.
+ *
+ * @bam_arg - pointer to BAM device descriptor
+ *
+ * @pipe_index - pipe index
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_pipe_free(void *bam, u32 pipe_index);
+
+/**
+ * Initialize driver memory module
+ *
+ * This function initializes the driver memory module.
+ *
+ * @pipemem_phys_base - Pipe-Memory physical base.
+ *
+ * @pipemem_size - Pipe-Memory size.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_mem_init(phys_addr_t pipemem_phys_base, u32 pipemem_size);
+
+/**
+ * De-initialize driver memory module
+ *
+ * This function de-initializes the driver memory module.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_mem_de_init(void);
+
+/**
+ * Initialize BAM DMA module
+ *
+ * This function initializes the BAM DMA module.
+ *
+ * @bam_props - pointer to BAM DMA devices BSP configuration properties
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_init(const struct sps_bam_props *bam_props);
+
+/**
+ * De-initialize BAM DMA module
+ *
+ * This function de-initializes the SPS BAM DMA module.
+ *
+ */
+void sps_dma_de_init(void);
+
+/**
+ * Initialize BAM DMA device
+ *
+ * This function initializes a BAM DMA device.
+ *
+ * @h - BAM handle
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_device_init(unsigned long h);
+
+/**
+ * De-initialize BAM DMA device
+ *
+ * This function de-initializes a BAM DMA device.
+ *
+ * @h - BAM handle
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_dma_device_de_init(unsigned long h);
+
+/**
+ * Initialize connection mapping module
+ *
+ * This function initializes the SPS connection mapping module.
+ *
+ * @map_props - pointer to connection mapping BSP configuration properties
+ *
+ * @options - driver options bitflags (see SPS_OPT_*)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+
+int sps_map_init(const struct sps_map *map_props, u32 options);
+
+/**
+ * De-initialize connection mapping module
+ *
+ * This function de-initializes the SPS connection mapping module.
+ *
+ */
+void sps_map_de_init(void);
+
+/*
+ * bam_pipe_reset - reset a BAM pipe.
+ * @base:	BAM virtual address
+ * @pipe:	pipe index
+ *
+ * This function resets a BAM pipe.
+ */
+void bam_pipe_reset(void *base, u32 pipe);
+
+/*
+ * bam_disable_pipe - disable a BAM pipe.
+ * @base:	BAM virtual address
+ * @pipe:	pipe index
+ *
+ * This function disables a BAM pipe.
+ */
+void bam_disable_pipe(void *base, u32 pipe);
+#endif	/* _SPSI_H_ */
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index fa119a6..c99a5d6 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -476,18 +476,18 @@
 	  Command DB queries shared memory by key string for shared system
 	  resources
 
-config MSM_QDSP6_APRV2
-        bool "Audio QDSP6 APRv2 support"
-        depends on MSM_SMD
+config MSM_QDSP6_APRV2_GLINK
+        bool "Audio QDSP6 APRv2 Over Glink support"
+	depends on MSM_GLINK
         help
           Enable APRv2 IPC protocol support between
           application processor and QDSP6. APR is
           used by audio driver to configure QDSP6's
           ASM, ADM and AFE.
 
-config MSM_QDSP6_APRV3
-	bool "Audio QDSP6 APRv3 support"
-	depends on MSM_SMD
+config MSM_QDSP6_APRV3_GLINK
+	bool "Audio QDSP6 APRv3 Over Glink support"
+	depends on MSM_GLINK
 	help
 	  Enable APRv3 IPC protocol support between
 	  application processor and QDSP6. APR is
@@ -497,7 +497,7 @@
 config MSM_ADSP_LOADER
 	tristate "ADSP loader support"
 	select SND_SOC_MSM_APRV2_INTF
-	depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3
+	depends on MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
 	help
 	  Enable ADSP image loader.
 	  The ADSP loader brings ADSP out of reset
@@ -506,7 +506,7 @@
 
 config MSM_AVTIMER
 	tristate "Avtimer Driver"
-	depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3
+	depends on MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK
 	help
 		This driver gets the Q6 out of power collapsed state and
 		exposes ioctl control to read avtimer tick.
diff --git a/drivers/soc/qcom/avtimer.c b/drivers/soc/qcom/avtimer.c
index 5482fbf..757cdb0 100644
--- a/drivers/soc/qcom/avtimer.c
+++ b/drivers/soc/qcom/avtimer.c
@@ -517,7 +517,7 @@
 	s32 rc;
 
 	rc = platform_driver_register(&dev_avtimer_driver);
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		pr_err("%s: platform_driver_register failed\n", __func__);
 		goto error_platform_driver;
 	}
diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c
index 5f3c9d4..5d860a3 100644
--- a/drivers/soc/qcom/qdsp6v2/apr.c
+++ b/drivers/soc/qcom/qdsp6v2/apr.c
@@ -29,7 +29,6 @@
 #include <soc/qcom/subsystem_restart.h>
 #include <soc/qcom/scm.h>
 #include <sound/apr_audio-v2.h>
-#include <soc/qcom/smd.h>
 #include <linux/qdsp6v2/apr.h>
 #include <linux/qdsp6v2/apr_tal.h>
 #include <linux/qdsp6v2/dsp_debug.h>
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index b32ed9e..40a539e 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -27,7 +27,6 @@
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
-#include <soc/qcom/smd.h>
 #include <soc/qcom/glink.h>
 #include <linux/qdsp6v2/apr_tal.h>
 
diff --git a/drivers/soc/qcom/qdsp6v2/audio_notifier.c b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
index 9119e8b..a2b0f0e 100644
--- a/drivers/soc/qcom/qdsp6v2/audio_notifier.c
+++ b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
@@ -224,7 +224,7 @@
 		ret = -EINVAL;
 		goto done;
 	}
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		pr_err("%s: deregister failed for service %s, ret %d\n",
 			__func__, service_data[service][domain].name, ret);
 		goto done;
@@ -260,7 +260,7 @@
 		goto done;
 	}
 
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		pr_err("%s: service registration failed on service %s for client %s\n",
 			__func__, service_data[service][domain].name,
 			client_data->client_name);
@@ -329,7 +329,7 @@
 				service_data[service][domain].name);
 
 		ret = audio_notifer_reg_client_service(client_data, service);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			pr_err("%s: client %s failed to register on service %s",
 				__func__, client_data->client_name,
 				service_data[service][domain].name);
@@ -361,7 +361,7 @@
 		goto done;
 	}
 
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		pr_err("%s: deregister failed for client %s on service %s, ret %d\n",
 			__func__, client_data->client_name,
 			service_data[service][domain].name, ret);
@@ -370,7 +370,7 @@
 
 	ret = srcu_notifier_chain_unregister(&service_data[service][domain].
 					     client_nb_list, client_data->nb);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		pr_err("%s: srcu_notifier_chain_unregister failed, ret %d\n",
 			__func__, ret);
 		goto done;
@@ -397,7 +397,7 @@
 		client_data = list_entry(ptr,
 			struct client_data, list);
 		ret = audio_notifer_reg_client(client_data);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			pr_err("%s: audio_notifer_reg_client failed for client %s, ret %d\n",
 				__func__, client_data->client_name,
 				ret);
@@ -468,7 +468,7 @@
 	service_data[service][domain].state = notifier_opcode;
 	ret = srcu_notifier_call_chain(&service_data[service][domain].
 		client_nb_list, notifier_opcode, &data);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		pr_err("%s: srcu_notifier_call_chain returned %d, service %s, opcode 0x%lx\n",
 			__func__, ret, service_data[service][domain].name,
 			notifier_opcode);
@@ -570,7 +570,7 @@
 
 	mutex_lock(&notifier_mutex);
 	ret = audio_notifer_reg_client(client_data);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		mutex_unlock(&notifier_mutex);
 		pr_err("%s: audio_notifer_reg_client for client %s failed ret = %d\n",
 			__func__, client_data->client_name,
@@ -610,7 +610,7 @@
 	int ret;
 
 	ret = audio_pdr_register(&pdr_nb);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		pr_debug("%s: PDR register failed, ret = %d, disable service\n",
 			__func__, ret);
 		audio_notifer_disable_service(AUDIO_NOTIFIER_PDR_SERVICE);
diff --git a/drivers/soc/qcom/qdsp6v2/audio_pdr.c b/drivers/soc/qcom/qdsp6v2/audio_pdr.c
index 0270e1f..449d19a 100644
--- a/drivers/soc/qcom/qdsp6v2/audio_pdr.c
+++ b/drivers/soc/qcom/qdsp6v2/audio_pdr.c
@@ -112,7 +112,7 @@
 
 	ret = service_notif_unregister_notifier(
 		service_handle, nb);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		pr_err("%s: Failed to deregister service ret %d\n",
 			__func__, ret);
 done:
@@ -135,7 +135,7 @@
 		audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].client_name,
 		audio_pdr_services[AUDIO_PDR_DOMAIN_ADSP].service_name,
 		&audio_pdr_locator_nb);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		pr_err("%s get_service_location failed ret %d\n",
 			__func__, ret);
 		srcu_notifier_call_chain(&audio_pdr_cb_list,
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
index a021759..6ba20a4 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
@@ -207,7 +207,7 @@
 	/* name should be audio_acdb_client or Audio_Dec_Client,
 	 * bufsz should be 0 and fd shouldn't be 0 as of now
 	 */
-	*handle = ion_import_dma_buf(*client, fd);
+	*handle = ion_import_dma_buf_fd(*client, fd);
 	pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
 							name, fd, *handle);
 	if (IS_ERR_OR_NULL((void *) (*handle))) {
@@ -321,11 +321,11 @@
 				offset = 0;
 			}
 			len = min(len, remainder);
-			pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%ld\n",
+			pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%lu\n",
 				vma, (unsigned int)addr, len,
 				(unsigned int)vma->vm_start,
 				(unsigned int)vma->vm_end,
-				(unsigned long int)vma->vm_page_prot);
+				(unsigned long)vma->vm_page_prot.pgprot);
 			remap_pfn_range(vma, addr, page_to_pfn(page), len,
 					vma->vm_page_prot);
 			addr += len;
@@ -345,10 +345,10 @@
 			return ret;
 		}
 		pr_debug("phys=%pKK len=%zd\n", &phys_addr, phys_len);
-		pr_debug("vma=%pK, vm_start=%x vm_end=%x vm_pgoff=%ld vm_page_prot=%ld\n",
+		pr_debug("vma=%pK, vm_start=%x vm_end=%x vm_pgoff=%ld vm_page_prot=%lu\n",
 			vma, (unsigned int)vma->vm_start,
 			(unsigned int)vma->vm_end, vma->vm_pgoff,
-			(unsigned long int)vma->vm_page_prot);
+			(unsigned long)vma->vm_page_prot.pgprot);
 		va_len = vma->vm_end - vma->vm_start;
 		if ((offset > phys_len) || (va_len > phys_len-offset)) {
 			pr_err("wrong offset size %ld, lens= %zd, va_len=%zd\n",
@@ -403,7 +403,7 @@
 	 * name should be audio_acdb_client or Audio_Dec_Client,
 	 * bufsz should be 0 and fd shouldn't be 0 as of now
 	 */
-	*handle = ion_import_dma_buf(client, fd);
+	*handle = ion_import_dma_buf_fd(client, fd);
 	pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
 							name, fd, *handle);
 	if (IS_ERR_OR_NULL((void *)(*handle))) {
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index 3f9f229..758f627 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -234,7 +234,7 @@
 
 	mutex_lock(&ch->mutex);
 	rc = glink_queue_rx_intent(ch->handle, ch, req_size);
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(wpriv->dev, "%s: Failed to queue rx intent, rc = %d\n",
 			__func__, rc);
 		mutex_unlock(&ch->mutex);
@@ -309,7 +309,7 @@
 				ch->ch_cfg.intents_size[i]);
 			ret = glink_queue_rx_intent(ch->handle, ch,
 						    ch->ch_cfg.intents_size[i]);
-			if (IS_ERR_VALUE(ret))
+			if (ret < 0)
 				dev_warn(wpriv->dev, "%s: Failed to queue intent %d of size %d\n",
 					 __func__, i,
 					 ch->ch_cfg.intents_size[i]);
@@ -317,7 +317,7 @@
 
 		ret = glink_qos_latency(ch->handle, ch->ch_cfg.latency_in_us,
 					QOS_PKT_SIZE);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_warn(wpriv->dev, "%s: Failed to request qos %d for ch %s\n",
 				__func__, ch->ch_cfg.latency_in_us,
 				ch->ch_cfg.name);
@@ -362,7 +362,7 @@
 	mutex_lock(&wpriv->glink_mutex);
 	if (ch->handle) {
 		ret = glink_close(ch->handle);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(wpriv->dev, "%s: glink_close is failed, ret = %d\n",
 				 __func__, ret);
 		} else {
@@ -447,7 +447,7 @@
 	for (i = 0; i < wpriv->no_of_channels; i++) {
 		if (wpriv->ch && wpriv->ch[i]) {
 			ret = wdsp_glink_open_ch(wpriv->ch[i]);
-			if (IS_ERR_VALUE(ret))
+			if (ret < 0)
 				goto err_open;
 		}
 	}
@@ -627,7 +627,7 @@
 		ret = glink_tx(ch->handle, tx_buf,
 			       cpkt->payload, cpkt->payload_size,
 			       GLINK_TX_REQ_INTENT);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(wpriv->dev, "%s: glink tx failed, ret = %d\n",
 				__func__, ret);
 			/*
@@ -774,7 +774,7 @@
 	case WDSP_REG_PKT:
 		ret = wdsp_glink_ch_info_init(wpriv,
 					(struct wdsp_reg_pkt *)wpkt->payload);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_err(wpriv->dev, "%s: glink register failed, ret = %d\n",
 				__func__, ret);
 		kfree(tx_buf);
@@ -1009,7 +1009,7 @@
 
 	ret = alloc_chrdev_region(&wdev->dev_num, 0, MINOR_NUMBER_COUNT,
 				  WDSP_GLINK_DRIVER_NAME);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(&pdev->dev, "%s: Failed to alloc char dev, err = %d\n",
 			__func__, ret);
 		goto err_chrdev;
@@ -1034,7 +1034,7 @@
 
 	cdev_init(&wdev->cdev, &wdsp_glink_fops);
 	ret = cdev_add(&wdev->cdev, wdev->dev_num, MINOR_NUMBER_COUNT);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(&pdev->dev, "%s: Failed to register char dev, err = %d\n",
 			__func__, ret);
 		goto err_cdev_add;
diff --git a/include/linux/msm_pcie.h b/include/linux/msm_pcie.h
new file mode 100644
index 0000000..8316aaa
--- /dev/null
+++ b/include/linux/msm_pcie.h
@@ -0,0 +1,217 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_PCIE_H
+#define __MSM_PCIE_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+enum msm_pcie_config {
+	MSM_PCIE_CONFIG_INVALID = 0,
+	MSM_PCIE_CONFIG_NO_CFG_RESTORE = 0x1,
+	MSM_PCIE_CONFIG_LINKDOWN = 0x2,
+	MSM_PCIE_CONFIG_NO_RECOVERY = 0x4,
+};
+
+enum msm_pcie_pm_opt {
+	MSM_PCIE_SUSPEND,
+	MSM_PCIE_RESUME,
+	MSM_PCIE_DISABLE_PC,
+	MSM_PCIE_ENABLE_PC,
+};
+
+enum msm_pcie_event {
+	MSM_PCIE_EVENT_INVALID = 0,
+	MSM_PCIE_EVENT_LINKDOWN = 0x1,
+	MSM_PCIE_EVENT_LINKUP = 0x2,
+	MSM_PCIE_EVENT_WAKEUP = 0x4,
+};
+
+enum msm_pcie_trigger {
+	MSM_PCIE_TRIGGER_CALLBACK,
+	MSM_PCIE_TRIGGER_COMPLETION,
+};
+
+struct msm_pcie_notify {
+	enum msm_pcie_event event;
+	void *user;
+	void *data;
+	u32 options;
+};
+
+struct msm_pcie_register_event {
+	u32 events;
+	void *user;
+	enum msm_pcie_trigger mode;
+	void (*callback)(struct msm_pcie_notify *notify);
+	struct msm_pcie_notify notify;
+	struct completion *completion;
+	u32 options;
+};
+
+#ifdef CONFIG_PCI_MSM
+/**
+ * msm_pcie_pm_control - control the power state of a PCIe link.
+ * @pm_opt:	power management operation
+ * @busnr:	bus number of PCIe endpoint
+ * @user:	handle of the caller
+ * @data:	private data from the caller
+ * @options:	options for pm control
+ *
+ * This function gives PCIe endpoint device drivers the control to change
+ * the power state of a PCIe link for their device.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
+			void *data, u32 options);
+
+/**
+ * msm_pcie_register_event - register an event with PCIe bus driver.
+ * @reg:	event structure
+ *
+ * This function gives PCIe endpoint device drivers an option to register
+ * events with PCIe bus driver.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_register_event(struct msm_pcie_register_event *reg);
+
+/**
+ * msm_pcie_deregister_event - deregister an event with PCIe bus driver.
+ * @reg:	event structure
+ *
+ * This function gives PCIe endpoint device drivers an option to deregister
+ * events with PCIe bus driver.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_deregister_event(struct msm_pcie_register_event *reg);
+
+/**
+ * msm_pcie_recover_config - recover config space.
+ * @dev:	pci device structure
+ *
+ * This function recovers the config space of both RC and Endpoint.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_recover_config(struct pci_dev *dev);
+
+/**
+ * msm_pcie_enumerate - enumerate Endpoints.
+ * @rc_idx:	RC that Endpoints connect to.
+ *
+ * This function enumerates Endpoints connected to RC.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_enumerate(u32 rc_idx);
+
+/**
+ * msm_pcie_recover_config - recover config space.
+ * @dev:	pci device structure
+ *
+ * This function recovers the config space of both RC and Endpoint.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_recover_config(struct pci_dev *dev);
+
+/**
+ * msm_pcie_shadow_control - control the shadowing of PCIe config space.
+ * @dev:	pci device structure
+ * @enable:	shadowing should be enabled or disabled
+ *
+ * This function gives PCIe endpoint device drivers the control to enable
+ * or disable the shadowing of PCIe config space.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_shadow_control(struct pci_dev *dev, bool enable);
+
+/*
+ * msm_pcie_debug_info - run a PCIe specific debug testcase.
+ * @dev:	pci device structure
+ * @option:	specifies which PCIe debug testcase to execute
+ * @base:	PCIe specific range
+ * @offset:	offset of destination register
+ * @mask:	mask the bit(s) of destination register
+ * @value:	value to be written to destination register
+ *
+ * This function gives PCIe endpoint device drivers the control to
+ * run a debug testcase.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
+			u32 offset, u32 mask, u32 value);
+
+/*
+ * msm_pcie_configure_sid - calculates the SID for a PCIe endpoint.
+ * @dev:	device structure
+ * @sid:	the calculated SID
+ * @domain:	the domain number of the Root Complex
+ *
+ * This function calculates the SID for a PCIe endpoint device.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int msm_pcie_configure_sid(struct device *dev, u32 *sid,
+			int *domain);
+#else /* !CONFIG_PCI_MSM */
+static inline int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr,
+			void *user, void *data, u32 options)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_register_event(struct msm_pcie_register_event *reg)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_recover_config(struct pci_dev *dev)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_enumerate(u32 rc_idx)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_shadow_control(struct pci_dev *dev, bool enable)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
+			u32 offset, u32 mask, u32 value)
+{
+	return -ENODEV;
+}
+
+static inline int msm_pcie_configure_sid(struct device *dev, u32 *sid,
+			int *domain)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_PCI_MSM */
+
+#endif /* __MSM_PCIE_H */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 1f403c3..71fd438 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1505,6 +1505,8 @@
 void snd_soc_component_async_complete(struct snd_soc_component *component);
 int snd_soc_component_test_bits(struct snd_soc_component *component,
 	unsigned int reg, unsigned int mask, unsigned int value);
+struct snd_soc_component *soc_find_component(
+	const struct device_node *of_node, const char *name);
 
 #ifdef CONFIG_REGMAP
 
diff --git a/include/uapi/Kbuild b/include/uapi/Kbuild
index 245aa6e..aeb3366 100644
--- a/include/uapi/Kbuild
+++ b/include/uapi/Kbuild
@@ -13,3 +13,4 @@
 header-y += xen/
 header-y += scsi/
 header-y += misc/
+header-y += media/
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
new file mode 100644
index 0000000..0f34403
--- /dev/null
+++ b/include/uapi/media/Kbuild
@@ -0,0 +1,5 @@
+header-y += cam_req_mgr.h
+header-y += cam_defs.h
+header-y += cam_isp.h
+header-y += cam_isp_vfe.h
+header-y += cam_isp_ife.h
diff --git a/include/uapi/media/cam_defs.h b/include/uapi/media/cam_defs.h
new file mode 100644
index 0000000..ad2adb3
--- /dev/null
+++ b/include/uapi/media/cam_defs.h
@@ -0,0 +1,357 @@
+#ifndef __UAPI_CAM_DEFS_H__
+#define __UAPI_CAM_DEFS_H__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+
+/* camera op codes */
+#define CAM_COMMON_OPCODE_BASE                  0
+#define CAM_QUERY_CAP                           1
+#define CAM_ACQUIRE_DEV                         2
+#define CAM_START_DEV                           3
+#define CAM_STOP_DEV                            4
+#define CAM_CONFIG_DEV                          5
+#define CAM_RELEASE_DEV                         6
+#define CAM_COMMON_OPCODE_MAX                   7
+
+/* camera handle type */
+#define CAM_HANDLE_USER_POINTER                 1
+#define CAM_HANDLE_MEM_HANDLE                   2
+
+/**
+ * struct cam_control - struct used by ioctl control for camera
+ * @op_code:            This is the op code for camera control
+ * @size:               control command size
+ * @handle_type:        user pointer or shared memory handle
+ * @reserved:           reserved field for 64 bit alignment
+ * @handle:             control command payload
+ */
+struct cam_control {
+	uint32_t        op_code;
+	uint32_t        size;
+	uint32_t        handle_type;
+	uint32_t        reserved;
+	uint64_t        handle;
+};
+
+/* camera IOCTL */
+#define VIDIOC_CAM_CONTROL \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct cam_control)
+
+/**
+ * struct cam_hw_version - Structure for HW version of camera devices
+ *
+ * @major    : Hardware version major
+ * @minor    : Hardware version minor
+ * @incr     : Hardware version increment
+ * @reserved : reserved for 64 bit aligngment
+ */
+struct cam_hw_version {
+	uint32_t major;
+	uint32_t minor;
+	uint32_t incr;
+	uint32_t reserved;
+};
+
+/**
+ * struct cam_iommu_handle - Structure for IOMMU handles of camera hw devices
+ *
+ * @non_secure: Device Non Secure IOMMU handle
+ * @secure:     Device Secure IOMMU handle
+ *
+ */
+struct cam_iommu_handle {
+	int32_t non_secure;
+	int32_t secure;
+};
+
+/* camera secure mode */
+#define CAM_SECURE_MODE_NON_SECURE             0
+#define CAM_SECURE_MODE_SECURE                 1
+
+/* Camera Format Type */
+#define CAM_FORMAT_BASE                         0
+#define CAM_FORMAT_MIPI_RAW_6                   1
+#define CAM_FORMAT_MIPI_RAW_8                   2
+#define CAM_FORMAT_MIPI_RAW_10                  3
+#define CAM_FORMAT_MIPI_RAW_12                  4
+#define CAM_FORMAT_MIPI_RAW_14                  5
+#define CAM_FORMAT_MIPI_RAW_16                  6
+#define CAM_FORMAT_MIPI_RAW_20                  7
+#define CAM_FORMAT_QTI_RAW_8                    8
+#define CAM_FORMAT_QTI_RAW_10                   9
+#define CAM_FORMAT_QTI_RAW_12                   10
+#define CAM_FORMAT_QTI_RAW_14                   11
+#define CAM_FORMAT_PLAIN8                       12
+#define CAM_FORMAT_PLAIN16_8                    13
+#define CAM_FORMAT_PLAIN16_10                   14
+#define CAM_FORMAT_PLAIN16_12                   15
+#define CAM_FORMAT_PLAIN16_14                   16
+#define CAM_FORMAT_PLAIN16_16                   17
+#define CAM_FORMAT_PLAIN32_20                   18
+#define CAM_FORMAT_PLAIN64                      19
+#define CAM_FORMAT_PLAIN128                     20
+#define CAM_FORMAT_ARGB                         21
+#define CAM_FORMAT_ARGB_10                      22
+#define CAM_FORMAT_ARGB_12                      23
+#define CAM_FORMAT_ARGB_14                      24
+#define CAM_FORMAT_DPCM_10_6_10                 25
+#define CAM_FORMAT_DPCM_10_8_10                 26
+#define CAM_FORMAT_DPCM_12_6_12                 27
+#define CAM_FORMAT_DPCM_12_8_12                 28
+#define CAM_FORMAT_DPCM_14_8_14                 29
+#define CAM_FORMAT_DPCM_14_10_14                30
+#define CAM_FORMAT_NV21                         31
+#define CAM_FORMAT_NV12                         32
+#define CAM_FORMAT_TP10                         33
+#define CAM_FORMAT_YUV422                       34
+#define CAM_FORMAT_PD8                          35
+#define CAM_FORMAT_PD10                         36
+#define CAM_FORMAT_UBWC_NV12                    37
+#define CAM_FORMAT_UBWC_NV12_4R                 38
+#define CAM_FORMAT_UBWC_TP10                    39
+#define CAM_FORMAT_UBWC_P010                    40
+#define CAM_FORMAT_PLAIN8_SWAP                  41
+#define CAM_FORMAT_PLAIN8_10                    42
+#define CAM_FORMAT_PLAIN8_10_SWAP               43
+#define CAM_FORMAT_YV12                         44
+#define CAM_FORMAT_Y_ONLY                       45
+#define CAM_FORMAT_MAX                          46
+
+
+/* camera packet */
+
+/* camera rotaion */
+#define CAM_ROTATE_CW_0_DEGREE                  0
+#define CAM_ROTATE_CW_90_DEGREE                 1
+#define CAM_RORATE_CW_180_DEGREE                2
+#define CAM_ROTATE_CW_270_DEGREE                3
+
+/* camera Color Space */
+#define CAM_COLOR_SPACE_BASE                    0
+#define CAM_COLOR_SPACE_BT601_FULL              1
+#define CAM_COLOR_SPACE_BT601625                2
+#define CAM_COLOR_SPACE_BT601525                3
+#define CAM_COLOR_SPACE_BT709                   4
+#define CAM_COLOR_SPACE_DEPTH                   5
+#define CAM_COLOR_SPACE_MAX                     6
+
+/* camera buffer direction */
+#define CAM_BUF_INPUT                           1
+#define CAM_BUF_OUTPUT                          2
+
+/* camera packet device Type */
+#define CAM_PACKET_DEV_BASE                     0
+#define CAM_PACKET_DEV_IMG_SENSOR               1
+#define CAM_PACKET_DEV_ACTUATOR                 2
+#define CAM_PACKET_DEV_COMPANION                3
+#define CAM_PACKET_DEV_EEPOM                    4
+#define CAM_PACKET_DEV_CSIPHY                   5
+#define CAM_PACKET_DEV_OIS                      6
+#define CAM_PACKET_DEV_FLASH                    7
+#define CAM_PACKET_DEV_FD                       8
+#define CAM_PACKET_DEV_JPEG_ENC                 9
+#define CAM_PACKET_DEV_JPEG_DEC                 10
+#define CAM_PACKET_DEV_VFE                      11
+#define CAM_PACKET_DEV_CPP                      12
+#define CAM_PACKET_DEV_CSID                     13
+#define CAM_PACKET_DEV_ISPIF                    14
+#define CAM_PACKET_DEV_IFE                      15
+#define CAM_PACKET_DEV_ICP                      16
+#define CAM_PACKET_DEV_LRME                     17
+#define CAM_PACKET_DEV_MAX                      18
+
+
+/* constants */
+#define CAM_PACKET_MAX_PLANES                   3
+
+/**
+ * struct cam_plane_cfg - plane configuration info
+ *
+ * @width:                      plane width in pixels
+ * @height:                     plane height in lines
+ * @plane_stride:               plane stride in pixel
+ * @slice_height:               slice height in line (not used by ISP)
+ *
+ */
+struct cam_plane_cfg {
+	uint32_t                width;
+	uint32_t                height;
+	uint32_t                plane_stride;
+	uint32_t                slice_height;
+};
+
+/**
+ * struct cam_buf_io_cfg - Buffer io configuration for buffers
+ *
+ * @mem_handle:                 mem_handle array for the buffers.
+ * @offsets:                    offsets for each planes in the buffer
+ * @planes:                     per plane information
+ * @width:                      main plane width in pixel
+ * @height:                     main plane height in lines
+ * @format:                     format of the buffer
+ * @color_space:                color space for the buffer
+ * @color_pattern:              color pattern in the buffer
+ * @bpp:                        bit per pixel
+ * @rotation:                   rotation information for the buffer
+ * @resource_type:              resource type associated with the buffer
+ * @fence:                      fence handle
+ * @cmd_buf_index:              command buffer index to patch the buffer info
+ * @cmd_buf_offset:             offset within the command buffer to patch
+ * @flag:                       flags for extra information
+ * @direction:                  buffer direction: input or output
+ * @padding:                    padding for the structure
+ *
+ */
+struct cam_buf_io_cfg {
+	int32_t                         mem_handle[CAM_PACKET_MAX_PLANES];
+	uint32_t                        offsets[CAM_PACKET_MAX_PLANES];
+	struct cam_plane_cfg            planes[CAM_PACKET_MAX_PLANES];
+	uint32_t                        width;
+	uint32_t                        height;
+	uint32_t                        format;
+	uint32_t                        color_space;
+	uint32_t                        color_pattern;
+	uint32_t                        bpp;
+	uint32_t                        rotation;
+	uint32_t                        resource_type;
+	int32_t                         fence;
+	uint32_t                        cmd_buf_index;
+	uint32_t                        cmd_buf_offset;
+	uint32_t                        flag;
+	uint32_t                        direction;
+	uint32_t                        padding;
+};
+
+/**
+ * struct cam_cmd_buf_desc - Command buffer descriptor
+ *
+ * @mem_handle:                 command buffer handle
+ * @offset:                     command start offset
+ * @size:                       size of the command buffer in bytes
+ * @length:                     used memory in command buffer in bytes
+ * @type:                       type of the command buffer
+ * @meta_data:                  data type for private command buffer
+ *                              between UMD and KMD
+ *
+ */
+struct cam_cmd_buf_desc {
+	int32_t                 mem_handle;
+	uint32_t                offset;
+	uint32_t                size;
+	uint32_t                length;
+	uint32_t                type;
+	uint32_t                meta_data;
+};
+
+/**
+ * struct cam_packet_header - camera packet header
+ *
+ * @op_code:                    camera packet opcode
+ * @dev_type:                   camera packet device type
+ * @size:                       size of the camera packet in bytes
+ * @flags:                      flags for the camera packet
+ * @request_id:                 request id for this camera packet
+ *
+ */
+struct cam_packet_header {
+	uint32_t                op_code;
+	uint32_t                dev_type;
+	uint32_t                size;
+	uint32_t                flags;
+	uint64_t                request_id;
+};
+
+/**
+ * struct cam_patch_desc - Patch structure
+ *
+ * @dst_buf_hdl:                memory handle for the dest buffer
+ * @dst_offset:                 offset byte in the dest buffer
+ * @src_buf_hdl:                memory handle for the source buffer
+ * @src_offset:                 offset byte in the source buffer
+ *
+ */
+struct cam_patch_desc {
+	int32_t                 dst_buf_hdl;
+	uint32_t                dst_offset;
+	int32_t                 src_buf_hdl;
+	uint32_t                src_offset;
+};
+
+/**
+ * struct cam_packet - cam packet structure
+ *
+ * @header:                     camera packet header
+ * @cmd_buf_offset:             command buffer start offset
+ * @num_cmd_buf:                number of the command buffer in the packet
+ * @io_config_offset:           buffer io configuration start offset
+ * @num_io_configs:             number of the buffer io configurations
+ * @patch_offset:               patch offset for the patch structure
+ * @num_patches:                number of the patch structure
+ * @kmd_cmd_buf_index:          command buffer index which contains extra
+ *                                      space for the KMD buffer
+ * @kmd_cmd_buf_offset:         offset from the beginning of the command
+ *                                      buffer for KMD usage.
+ * @payload:                    camera packet payload
+ *
+ */
+struct cam_packet {
+	struct cam_packet_header        header;
+	uint32_t                        cmd_buf_offset;
+	uint32_t                        num_cmd_buf;
+	uint32_t                        io_configs_offset;
+	uint32_t                        num_io_configs;
+	uint32_t                        patch_offset;
+	uint32_t                        num_patches;
+	uint32_t                        kmd_cmd_buf_index;
+	uint32_t                        kmd_cmd_buf_offset;
+	uint64_t                        payload[1];
+
+};
+
+/* Release Device */
+/**
+ * struct cam_release_dev_cmd - control payload for release devices
+ *
+ * @session_handle:             session handle for the release
+ * @dev_handle:                 device handle for the release
+ */
+struct cam_release_dev_cmd {
+	int32_t                 session_handle;
+	int32_t                 dev_handle;
+};
+
+/* Start/Stop device */
+/**
+ * struct cam_start_stop_dev_cmd - control payload for start/stop device
+ *
+ * @session_handle:             session handle for the start/stop command
+ * @dev_handle:                 device handle for the start/stop command
+ *
+ */
+struct cam_start_stop_dev_cmd {
+	int32_t                 session_handle;
+	int32_t                 dev_handle;
+};
+
+/* Configure Device */
+/**
+ * struct cam_config_dev_cmd - command payload for configure device
+ *
+ * @session_handle:             session handle for the command
+ * @dev_handle:                 device handle for the command
+ * @offset:                     offset byte in the packet handle.
+ * @packet_handle:              packet memory handle for the actual packet:
+ *                                      struct cam_packet.
+ *
+ */
+struct cam_config_dev_cmd {
+	int32_t                 session_handle;
+	int32_t                 dev_handle;
+	uint64_t                offset;
+	uint64_t                packet_handle;
+};
+
+#endif /* __UAPI_CAM_DEFS_H__ */
diff --git a/include/uapi/media/cam_isp.h b/include/uapi/media/cam_isp.h
new file mode 100644
index 0000000..dc8268c
--- /dev/null
+++ b/include/uapi/media/cam_isp.h
@@ -0,0 +1,235 @@
+#ifndef __UAPI_CAM_ISP_H__
+#define __UAPI_CAM_ISP_H__
+
+#include "cam_defs.h"
+#include "cam_isp_vfe.h"
+#include "cam_isp_ife.h"
+
+
+/* ISP driver name */
+#define CAM_ISP_DEV_NAME                        "cam-isp"
+
+/* HW type */
+#define CAM_ISP_HW_BASE                         0
+#define CAM_ISP_HW_CSID                         1
+#define CAM_ISP_HW_VFE                          2
+#define CAM_ISP_HW_IFE                          3
+#define CAM_ISP_HW_ISPIF                        4
+#define CAM_ISP_HW_MAX                          5
+
+/* Color Pattern */
+#define CAM_ISP_PATTERN_BAYER_RGRGRG            0
+#define CAM_ISP_PATTERN_BAYER_GRGRGR            1
+#define CAM_ISP_PATTERN_BAYER_BGBGBG            2
+#define CAM_ISP_PATTERN_BAYER_GBGBGB            3
+#define CAM_ISP_PATTERN_YUV_YCBYCR              4
+#define CAM_ISP_PATTERN_YUV_YCRYCB              5
+#define CAM_ISP_PATTERN_YUV_CBYCRY              6
+#define CAM_ISP_PATTERN_YUV_CRYCBY              7
+#define CAM_ISP_PATTERN_MAX                     8
+
+/* Usage Type */
+#define CAM_ISP_RES_USAGE_SINGLE                0
+#define CAM_ISP_RES_USAGE_DUAL                  1
+#define CAM_ISP_RES_USAGE_MAX                   2
+
+/* Resource ID */
+#define CAM_ISP_RES_ID_PORT                     0
+#define CAM_ISP_RES_ID_CLK                      1
+#define CAM_ISP_RES_ID_MAX                      2
+
+/* Resource Type - Type of resource for the resource id
+ * defined in cam_isp_vfe.h, cam_isp_ife.h
+ */
+
+/* Lane Type in input resource for Port */
+#define CAM_ISP_LANE_TYPE_DPHY                  0
+#define CAM_ISP_LANE_TYPE_CPHY                  1
+#define CAM_ISP_LANE_TYPE_MAX                   2
+
+/* ISP Resurce Composite Group ID */
+#define CAM_ISP_RES_COMP_GROUP_NONE             0
+#define CAM_ISP_RES_COMP_GROUP_ID_0             1
+#define CAM_ISP_RES_COMP_GROUP_ID_1             2
+#define CAM_ISP_RES_COMP_GROUP_ID_2             3
+#define CAM_ISP_RES_COMP_GROUP_ID_3             4
+#define CAM_ISP_RES_COMP_GROUP_ID_4             5
+#define CAM_ISP_RES_COMP_GROUP_ID_5             6
+#define CAM_ISP_RES_COMP_GROUP_ID_MAX           6
+
+/* ISP packet opcode for ISP */
+#define CAM_ISP_PACKET_OP_BASE                  0
+#define CAM_ISP_PACKET_INIT_DEV                 1
+#define CAM_ISP_PACKET_UPDATE_DEV               2
+#define CAM_ISP_PACKET_OP_MAX                   3
+
+/* ISP packet meta_data type for command buffer */
+#define CAM_ISP_PACKET_META_BASE                0
+#define CAM_ISP_PACKET_META_LEFT                1
+#define CAM_ISP_PACKET_META_RIGHT               2
+#define CAM_ISP_PACKET_META_COMMON              3
+#define CAM_ISP_PACKET_META_DMI_LEFT            4
+#define CAM_ISP_PACKET_META_DMI_RIGHT           5
+#define CAM_ISP_PACKET_META_DMI_COMMON          6
+#define CAM_ISP_PACKET_META_CLOCK               7
+#define CAM_ISP_PACKET_META_CSID                8
+#define CAM_ISP_PACKET_META_MAX                 9
+
+
+/* Query devices */
+/**
+ * struct cam_isp_dev_cap_info - A cap info for particular hw type
+ *
+ * @hw_type:            Hardware type for the cap info
+ * @hw_version:         Hardware version
+ *
+ */
+struct cam_isp_dev_cap_info {
+	uint32_t              hw_type;
+	struct cam_hw_version hw_version;
+};
+
+/**
+ * struct cam_isp_query_cap_cmd - ISP query device capability payload
+ *
+ * @device_iommu:               returned iommu handles for device
+ * @cdm_iommu:                  returned iommu handles for cdm
+ * @num_dev:                    returned number of device capabilities
+ * @reserved:                   reserved field for alignment
+ * @dev_caps:                   returned device capability array
+ *
+ */
+struct cam_isp_query_cap_cmd {
+	struct cam_iommu_handle       device_iommu;
+	struct cam_iommu_handle       cdm_iommu;
+	int32_t                       num_dev;
+	uint32_t                      reserved;
+	struct cam_isp_dev_cap_info   dev_caps[CAM_ISP_HW_MAX];
+};
+
+/* Acquire Device */
+/**
+ * struct cam_isp_out_port_info - An output port resource info
+ *
+ * @res_type:                   output resource type defined in file
+ *                              cam_isp_vfe.h or cam_isp_ife.h
+ * @format:                     output format of the resource
+ * @wdith:                      output width in pixels
+ * @height:                     output height in lines
+ * @comp_grp_id:                composite group id for the resource.
+ * @split_point:                split point in pixels for the dual VFE.
+ * @secure_mode:                flag to tell if output should be run in secure
+ *                              mode or not. See cam_defs.h for definition
+ * @reserved:                   reserved field for alignment
+ *
+ */
+struct cam_isp_out_port_info {
+	uint32_t                res_type;
+	uint32_t                format;
+	uint32_t                width;
+	uint32_t                height;
+	uint32_t                comp_grp_id;
+	uint32_t                split_point;
+	uint32_t                secure_mode;
+	uint32_t                reserved;
+};
+
+/**
+ * struct cam_isp_in_port_info - An input port resource info
+ *
+ * @res_type:                   input resource type define in file
+ *                              cam_isp_vfe.h or cam_isp_ife.h
+ * @lane_type:                  lane type: c-phy or d-phy.
+ * @lane_num:                   active lane number
+ * @lane_cfg:                   lane configurations: 4 bits per lane
+ * @vc:                         input virtual channel number
+ * @dt:                         input data type number
+ * @format:                     input format
+ * @test_pattern:               test pattern for the testgen
+ * @usage_type:                 whether dual vfe is required
+ * @left_start:                 left input start offset in pixels
+ * @left_stop:                  left input stop offset in pixels
+ * @left_width:                 left input width in pixels
+ * @right_start:                right input start offset in pixels.
+ *                              Only for Dual VFE
+ * @right_stop:                 right input stop offset in pixels.
+ *                              Only for Dual VFE
+ * @right_width:                right input width in pixels.
+ *                              Only for dual VFE
+ * @line_start:                 top of the line number
+ * @line_stop:                  bottome of the line number
+ * @height:                     input height in lines
+ * @pixel_clk;                  sensor output clock
+ * @batch_size:                 batch size for HFR mode
+ * @reserved:                   reserved field for alignment
+ * @num_out_res:                number of the output resource associated
+ * @data:                       payload that contains the output resources
+ *
+ */
+struct cam_isp_in_port_info {
+	uint32_t                        res_type;
+	uint32_t                        lane_type;
+	uint32_t                        lane_num;
+	uint32_t                        lane_cfg;
+	uint32_t                        vc;
+	uint32_t                        dt;
+	uint32_t                        format;
+	uint32_t                        test_pattern;
+	uint32_t                        usage_type;
+	uint32_t                        left_start;
+	uint32_t                        left_stop;
+	uint32_t                        left_width;
+	uint32_t                        right_start;
+	uint32_t                        right_stop;
+	uint32_t                        right_width;
+	uint32_t                        line_start;
+	uint32_t                        line_stop;
+	uint32_t                        height;
+	uint32_t                        pixel_clk;
+	uint32_t                        batch_size;
+	uint32_t                        reserved;
+	uint32_t                        num_out_res;
+	struct cam_isp_out_port_info    data[1];
+};
+
+/**
+ * struct cam_isp_resource - A resource bundle
+ *
+ * @resoruce_id:                resource id for the resource bundle
+ * @length:                     length of the while resource blob
+ * @handle_type:                type of the resource handle
+ * @reserved:                   reserved field for alignment
+ * @res_hdl:                    resource handle that points to the
+ *                                     resource array;
+ *
+ */
+struct cam_isp_resource {
+	uint32_t                       resource_id;
+	uint32_t                       length;
+	uint32_t                       handle_type;
+	uint32_t                       reserved;
+	uint64_t                       res_hdl;
+};
+
+/**
+ * struct cam_isp_acquire_dev_cmd - control payload for acquire devices
+ *
+ * @session_handle:             session handle for the acquire command
+ * @dev_handle:                 device handle to be returned
+ * @num_resources:              number of the resources to be acquired
+ * @handle_type:                resource handle type:
+ *                                      1 = user poniter, 2 = mem handle
+ * @resources_hdl:              resource handle that refers to the actual
+ *                                      resource array. Each item in this
+ *                                      array is struct cam_isp_resource
+ *
+ */
+struct cam_isp_acquire_dev_cmd {
+	int32_t                 session_handle;
+	int32_t                 dev_handle;
+	uint32_t                num_resources;
+	uint32_t                handle_type;
+	uint64_t                resource_hdl;
+};
+
+#endif /* __UAPI_CAM_ISP_H__ */
diff --git a/include/uapi/media/cam_isp_ife.h b/include/uapi/media/cam_isp_ife.h
new file mode 100644
index 0000000..f5e7281
--- /dev/null
+++ b/include/uapi/media/cam_isp_ife.h
@@ -0,0 +1,39 @@
+#ifndef __UAPI_CAM_ISP_IFE_H__
+#define __UAPI_CAM_ISP_IFE_H__
+
+/* IFE output port resource type (global unique)*/
+#define CAM_ISP_IFE_OUT_RES_BASE               0x3000
+
+#define CAM_ISP_IFE_OUT_RES_FULL               (CAM_ISP_IFE_OUT_RES_BASE + 0)
+#define CAM_ISP_IFE_OUT_RES_DS4                (CAM_ISP_IFE_OUT_RES_BASE + 1)
+#define CAM_ISP_IFE_OUT_RES_DS16               (CAM_ISP_IFE_OUT_RES_BASE + 2)
+#define CAM_ISP_IFE_OUT_RES_RAW_DUMP           (CAM_ISP_IFE_OUT_RES_BASE + 3)
+#define CAM_ISP_IFE_OUT_RES_FD                 (CAM_ISP_IFE_OUT_RES_BASE + 4)
+#define CAM_ISP_IFE_OUT_RES_PDAF               (CAM_ISP_IFE_OUT_RES_BASE + 5)
+#define CAM_ISP_IFE_OUT_RES_RDI_0              (CAM_ISP_IFE_OUT_RES_BASE + 6)
+#define CAM_ISP_IFE_OUT_RES_RDI_1              (CAM_ISP_IFE_OUT_RES_BASE + 7)
+#define CAM_ISP_IFE_OUT_RES_RDI_2              (CAM_ISP_IFE_OUT_RES_BASE + 8)
+#define CAM_ISP_IFE_OUT_RES_RDI_3              (CAM_ISP_IFE_OUT_RES_BASE + 9)
+#define CAM_ISP_IFE_OUT_RES_STATS_HDR_BE       (CAM_ISP_IFE_OUT_RES_BASE + 10)
+#define CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST    (CAM_ISP_IFE_OUT_RES_BASE + 11)
+#define CAM_ISP_IFE_OUT_RES_STATS_TL_BG        (CAM_ISP_IFE_OUT_RES_BASE + 12)
+#define CAM_ISP_IFE_OUT_RES_STATS_BF           (CAM_ISP_IFE_OUT_RES_BASE + 13)
+#define CAM_ISP_IFE_OUT_RES_STATS_AWB_BG       (CAM_ISP_IFE_OUT_RES_BASE + 14)
+#define CAM_ISP_IFE_OUT_RES_STATS_BHIST        (CAM_ISP_IFE_OUT_RES_BASE + 15)
+#define CAM_ISP_IFE_OUT_RES_STATS_RS           (CAM_ISP_IFE_OUT_RES_BASE + 16)
+#define CAM_ISP_IFE_OUT_RES_STATS_CS           (CAM_ISP_IFE_OUT_RES_BASE + 17)
+#define CAM_ISP_IFE_OUT_RES_STATS_IHIST        (CAM_ISP_IFE_OUT_RES_BASE + 18)
+#define CAM_ISP_IFE_OUT_RES_MAX                (CAM_ISP_IFE_OUT_RES_BASE + 19)
+
+
+/* IFE input port resource type (global unique) */
+#define CAM_ISP_IFE_IN_RES_BASE                 0x4000
+
+#define CAM_ISP_IFE_IN_RES_TPG                 (CAM_ISP_IFE_IN_RES_BASE + 0)
+#define CAM_ISP_IFE_IN_RES_PHY_0               (CAM_ISP_IFE_IN_RES_BASE + 1)
+#define CAM_ISP_IFE_IN_RES_PHY_1               (CAM_ISP_IFE_IN_RES_BASE + 2)
+#define CAM_ISP_IFE_IN_RES_PHY_2               (CAM_ISP_IFE_IN_RES_BASE + 3)
+#define CAM_ISP_IFE_IN_RES_PHY_3               (CAM_ISP_IFE_IN_RES_BASE + 4)
+#define CAM_ISP_IFE_IN_RES_MAX                 (CAM_ISP_IFE_IN_RES_BASE + 5)
+
+#endif /* __UAPI_CAM_ISP_IFE_H__ */
diff --git a/include/uapi/media/cam_isp_vfe.h b/include/uapi/media/cam_isp_vfe.h
new file mode 100644
index 0000000..e48db2f
--- /dev/null
+++ b/include/uapi/media/cam_isp_vfe.h
@@ -0,0 +1,44 @@
+#ifndef __UAPI_CAM_ISP_VFE_H__
+#define __UAPI_CAM_ISP_VFE_H__
+
+/* VFE output port resource type  (global unique)  */
+#define CAM_ISP_VFE_OUT_RES_BASE               0x1000
+
+#define CAM_ISP_VFE_OUT_RES_ENC                (CAM_ISP_VFE_OUT_RES_BASE + 0)
+#define CAM_ISP_VFE_OUT_RES_VIEW               (CAM_ISP_VFE_OUT_RES_BASE + 1)
+#define CAM_ISP_VFE_OUT_RES_VID                (CAM_ISP_VFE_OUT_RES_BASE + 2)
+#define CAM_ISP_VFE_OUT_RES_RDI_0              (CAM_ISP_VFE_OUT_RES_BASE + 3)
+#define CAM_ISP_VFE_OUT_RES_RDI_1              (CAM_ISP_VFE_OUT_RES_BASE + 4)
+#define CAM_ISP_VFE_OUT_RES_RDI_2              (CAM_ISP_VFE_OUT_RES_BASE + 5)
+#define CAM_ISP_VFE_OUT_RES_RDI_3              (CAM_ISP_VFE_OUT_RES_BASE + 6)
+#define CAM_ISP_VFE_OUT_RES_STATS_AEC          (CAM_ISP_VFE_OUT_RES_BASE + 7)
+#define CAM_ISP_VFE_OUT_RES_STATS_AF           (CAM_ISP_VFE_OUT_RES_BASE + 8)
+#define CAM_ISP_VFE_OUT_RES_STATS_AWB          (CAM_ISP_VFE_OUT_RES_BASE + 9)
+#define CAM_ISP_VFE_OUT_RES_STATS_RS           (CAM_ISP_VFE_OUT_RES_BASE + 10)
+#define CAM_ISP_VFE_OUT_RES_STATS_CS           (CAM_ISP_VFE_OUT_RES_BASE + 11)
+#define CAM_ISP_VFE_OUT_RES_STATS_IHIST        (CAM_ISP_VFE_OUT_RES_BASE + 12)
+#define CAM_ISP_VFE_OUT_RES_STATS_SKIN         (CAM_ISP_VFE_OUT_RES_BASE + 13)
+#define CAM_ISP_VFE_OUT_RES_STATS_BG           (CAM_ISP_VFE_OUT_RES_BASE + 14)
+#define CAM_ISP_VFE_OUT_RES_STATS_BF           (CAM_ISP_VFE_OUT_RES_BASE + 15)
+#define CAM_ISP_VFE_OUT_RES_STATS_BE           (CAM_ISP_VFE_OUT_RES_BASE + 16)
+#define CAM_ISP_VFE_OUT_RES_STATS_BHIST        (CAM_ISP_VFE_OUT_RES_BASE + 17)
+#define CAM_ISP_VFE_OUT_RES_STATS_BF_SCALE     (CAM_ISP_VFE_OUT_RES_BASE + 18)
+#define CAM_ISP_VFE_OUT_RES_STATS_HDR_BE       (CAM_ISP_VFE_OUT_RES_BASE + 19)
+#define CAM_ISP_VFE_OUT_RES_STATS_HDR_BHIST    (CAM_ISP_VFE_OUT_RES_BASE + 20)
+#define CAM_ISP_VFE_OUT_RES_STATS_AEC_BG       (CAM_ISP_VFE_OUT_RES_BASE + 21)
+#define CAM_ISP_VFE_OUT_RES_CAMIF_RAW          (CAM_ISP_VFE_OUT_RES_BASE + 22)
+#define CAM_ISP_VFE_OUT_RES_IDEAL_RAW          (CAM_ISP_VFE_OUT_RES_BASE + 23)
+#define CAM_ISP_VFE_OUT_RES_MAX                (CAM_ISP_VFE_OUT_RES_BASE + 24)
+
+/* VFE input port_ resource type (global unique) */
+#define CAM_ISP_VFE_IN_RES_BASE                0x2000
+
+#define CAM_ISP_VFE_IN_RES_TPG                 (CAM_ISP_VFE_IN_RES_BASE + 0)
+#define CAM_ISP_VFE_IN_RES_PHY_0               (CAM_ISP_VFE_IN_RES_BASE + 1)
+#define CAM_ISP_VFE_IN_RES_PHY_1               (CAM_ISP_VFE_IN_RES_BASE + 2)
+#define CAM_ISP_VFE_IN_RES_PHY_2               (CAM_ISP_VFE_IN_RES_BASE + 3)
+#define CAM_ISP_VFE_IN_RES_PHY_3               (CAM_ISP_VFE_IN_RES_BASE + 4)
+#define CAM_ISP_VFE_IN_RES_FE                  (CAM_ISP_VFE_IN_RES_BASE + 5)
+#define CAM_ISP_VFE_IN_RES_MAX                 (CAM_ISP_VFE_IN_RES_BASE + 6)
+
+#endif /* __UAPI_CAM_ISP_VFE_H__ */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
new file mode 100644
index 0000000..18bd04a
--- /dev/null
+++ b/include/uapi/media/cam_req_mgr.h
@@ -0,0 +1,327 @@
+#ifndef __UAPI_LINUX_CAM_REQ_MGR_H
+#define __UAPI_LINUX_CAM_REQ_MGR_H
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/media.h>
+#include <media/cam_defs.h>
+
+#define CAM_REQ_MGR_VNODE_NAME "cam-req-mgr-devnode"
+
+#define CAM_DEVICE_TYPE_BASE    (MEDIA_ENT_F_OLD_BASE)
+#define CAM_VNODE_DEVICE_TYPE   (CAM_DEVICE_TYPE_BASE)
+#define CAM_SENSOR_DEVICE_TYPE  (CAM_DEVICE_TYPE_BASE + 1)
+#define CAM_IFE_DEVICE_TYPE     (CAM_DEVICE_TYPE_BASE + 2)
+#define CAM_ICP_DEVICE_TYPE     (CAM_DEVICE_TYPE_BASE + 3)
+#define CAM_LRME_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 4)
+#define CAM_JPEG_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 5)
+#define CAM_FD_DEVICE_TYPE      (CAM_DEVICE_TYPE_BASE + 6)
+#define CAM_CPAS_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 7)
+#define CAM_CSIPHY_DEVICE_TYPE  (CAM_DEVICE_TYPE_BASE + 8)
+
+/* cam_req_mgr hdl info */
+#define CAM_REQ_MGR_HDL_IDX_POS           8
+#define CAM_REQ_MGR_HDL_IDX_MASK          ((1 << CAM_REQ_MGR_HDL_IDX_POS) - 1)
+#define CAM_REQ_MGR_GET_HDL_IDX(hdl)      (hdl & CAM_REQ_MGR_HDL_IDX_MASK)
+
+/**
+ * Max handles supported by cam_req_mgr
+ * It includes both session and device handles
+ */
+#define CAM_REQ_MGR_MAX_HANDLES           64
+#define MAX_LINKS_PER_SESSION             2
+
+/* V4L event type which user space will subscribe to */
+#define V4L_EVENT_CAM_REQ_MGR_EVENT       (V4L2_EVENT_PRIVATE_START + 0)
+
+/* Specific event ids to get notified in user space */
+#define V4L_EVENT_CAM_REQ_MGR_SOF         0
+#define V4L_EVENT_CAM_REQ_MGR_ERROR       1
+#define V4L_EVENT_CAM_REQ_MGR_MAX         2
+
+/**
+ * Request Manager : flush_type
+ * @CAM_REQ_MGR_FLUSH_TYPE_ALL: Req mgr will remove all the pending
+ * requests from input/processing queue.
+ * @CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ: Req mgr will remove only particular
+ * request id from input/processing queue.
+ * @CAM_REQ_MGR_FLUSH_TYPE_MAX: Max number of the flush type
+ * @opcode: CAM_REQ_MGR_FLUSH_REQ
+ */
+#define CAM_REQ_MGR_FLUSH_TYPE_ALL          0
+#define CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ   1
+#define CAM_REQ_MGR_FLUSH_TYPE_MAX          2
+
+/**
+ * struct cam_req_mgr_event_data
+ * @session_hdl: session handle
+ * @link_hdl: link handle
+ * @frame_id: frame id
+ * @reserved: reserved for 64 bit aligngment
+ * @req_id: request id
+ * @tv_sec: timestamp in seconds
+ * @tv_usec: timestamp in micro seconds
+ */
+struct cam_req_mgr_event_data {
+	int32_t   session_hdl;
+	int32_t   link_hdl;
+	int32_t   frame_id;
+	int32_t   reserved;
+	int64_t   req_id;
+	uint64_t  tv_sec;
+	uint64_t  tv_usec;
+};
+
+/**
+ * struct cam_req_mgr_session_info
+ * @session_hdl: In/Output param - session_handle
+ * @opcode1: CAM_REQ_MGR_CREATE_SESSION
+ * @opcode2: CAM_REQ_MGR_DESTROY_SESSION
+ */
+struct cam_req_mgr_session_info {
+	int32_t session_hdl;
+	int32_t reserved;
+};
+
+/**
+ * struct cam_req_mgr_link_info
+ * @session_hdl: Input param - Identifier for CSL session
+ * @num_devices: Input Param - Num of devices to be linked
+ * @dev_hdls: Input param - List of device handles to be linked
+ * @link_hdl: Output Param -Identifier for link
+ * @opcode: CAM_REQ_MGR_LINK
+ */
+struct cam_req_mgr_link_info {
+	int32_t session_hdl;
+	uint32_t num_devices;
+	int32_t dev_hdls[CAM_REQ_MGR_MAX_HANDLES];
+	int32_t link_hdl;
+};
+
+/**
+ * struct cam_req_mgr_unlink_info
+ * @session_hdl: input param - session handle
+ * @link_hdl: input param - link handle
+ * @opcode: CAM_REQ_MGR_UNLINK
+ */
+struct cam_req_mgr_unlink_info {
+	int32_t session_hdl;
+	int32_t link_hdl;
+};
+
+/**
+ * struct cam_req_mgr_flush_info
+ * @brief: User can tell drivers to flush a particular request id or
+ * flush all requests from its pending processing queue. Flush is a
+ * blocking call and driver shall ensure all requests are flushed
+ * before returning.
+ * @session_hdl: Input param - Identifier for CSL session
+ * @link_hdl: Input Param -Identifier for link
+ * @flush_type: User can cancel a particular req id or can flush
+ * all requests in queue
+ * @reserved: reserved for 64 bit aligngment
+ * @req_id: field is valid only if flush type is cancel request
+ * for flush all this field value is not considered.
+ * @opcode: CAM_REQ_MGR_FLUSH_REQ
+ */
+struct cam_req_mgr_flush_info {
+	int32_t session_hdl;
+	int32_t link_hdl;
+	uint32_t flush_type;
+	uint32_t reserved;
+	int64_t req_id;
+};
+
+/** struct cam_req_mgr_sched_info
+ * @session_hdl: Input param - Identifier for CSL session
+ * @link_hdl: Input Param -Identifier for link
+ * inluding itself.
+ * @bubble_enable: Input Param - Cam req mgr will do bubble recovery if this
+ * flag is set.
+ * @reserved: reserved field for alignment
+ * @req_id: Input Param - Request Id from which all requests will be flushed
+ */
+struct cam_req_mgr_sched_request {
+	int32_t session_hdl;
+	int32_t link_hdl;
+	int32_t bubble_enable;
+	int32_t reserved;
+	int64_t req_id;
+};
+
+/**
+ * struct cam_req_mgr_sync_mode
+ * @session_hdl:         Input param - Identifier for CSL session
+ * @sync_enable:         Input Param -Enable sync mode or disable
+ * @num_links:           Input Param - Num of links in sync mode (Valid only
+ *                             when sync_enable is TRUE)
+ * @link_hdls:           Input Param - Array of link handles to be in sync mode
+ *                             (Valid only when sync_enable is TRUE)
+ * @master_link_hdl:     Input Param - To dictate which link's SOF drives system
+ *                             (Valid only when sync_enable is TRUE)
+ *
+ * @opcode: CAM_REQ_MGR_SYNC_MODE
+ */
+struct cam_req_mgr_sync_mode {
+	int32_t session_hdl;
+	int32_t sync_enable;
+	int32_t num_links;
+	int32_t link_hdls[MAX_LINKS_PER_SESSION];
+	int32_t master_link_hdl;
+	int32_t reserved;
+};
+
+/**
+ * cam_req_mgr specific opcode ids
+ */
+#define CAM_REQ_MGR_CREATE_DEV_NODES            (CAM_COMMON_OPCODE_MAX + 1)
+#define CAM_REQ_MGR_CREATE_SESSION              (CAM_COMMON_OPCODE_MAX + 2)
+#define CAM_REQ_MGR_DESTROY_SESSION             (CAM_COMMON_OPCODE_MAX + 3)
+#define CAM_REQ_MGR_LINK                        (CAM_COMMON_OPCODE_MAX + 4)
+#define CAM_REQ_MGR_UNLINK                      (CAM_COMMON_OPCODE_MAX + 5)
+#define CAM_REQ_MGR_SCHED_REQ                   (CAM_COMMON_OPCODE_MAX + 6)
+#define CAM_REQ_MGR_FLUSH_REQ                   (CAM_COMMON_OPCODE_MAX + 7)
+#define CAM_REQ_MGR_SYNC_MODE                   (CAM_COMMON_OPCODE_MAX + 8)
+#define CAM_REQ_MGR_ALLOC_BUF                   (CAM_COMMON_OPCODE_MAX + 9)
+#define CAM_REQ_MGR_MAP_BUF                     (CAM_COMMON_OPCODE_MAX + 10)
+#define CAM_REQ_MGR_RELEASE_BUF                 (CAM_COMMON_OPCODE_MAX + 11)
+#define CAM_REQ_MGR_CACHE_OPS                   (CAM_COMMON_OPCODE_MAX + 12)
+#define CAM_REQ_MGR_GET_MMU_HDLS_DEBUG          (CAM_COMMON_OPCODE_MAX + 13)
+#define CAM_REQ_MGR_GET_IO_BUF_DEBUG            (CAM_COMMON_OPCODE_MAX + 14)
+#define CAM_REQ_MGR_GET_KMD_BUF_DEBUG           (CAM_COMMON_OPCODE_MAX + 15)
+/* end of cam_req_mgr opcodes */
+
+#define CAM_MEM_FLAG_HW_READ_WRITE              (1<<0)
+#define CAM_MEM_FLAG_HW_READ_ONLY               (1<<1)
+#define CAM_MEM_FLAG_HW_WRITE_ONLY              (1<<2)
+#define CAM_MEM_FLAG_KMD_ACCESS                 (1<<3)
+#define CAM_MEM_FLAG_UMD_ACCESS                 (1<<4)
+#define CAM_MEM_FLAG_PROTECTED_MODE             (1<<5)
+#define CAM_MEM_FLAG_CMD_BUF_TYPE               (1<<6)
+#define CAM_MEM_FLAG_PIXEL_BUF_TYPE             (1<<7)
+#define CAM_MEM_FLAG_STATS_BUF_TYPE             (1<<8)
+#define CAM_MEM_FLAG_PACKET_BUF_TYPE            (1<<9)
+#define CAM_MEM_FLAG_CACHE                      (1<<10)
+
+#define CAM_MEM_MMU_MAX_HANDLE                  16
+
+/* Maximum allowed buffers in existence */
+#define CAM_MEM_BUFQ_MAX                        1024
+
+#define CAM_MEM_MGR_HDL_IDX_SIZE                16
+#define CAM_MEM_MGR_HDL_FD_SIZE                 16
+#define CAM_MEM_MGR_HDL_IDX_END_POS             16
+#define CAM_MEM_MGR_HDL_FD_END_POS              32
+
+#define CAM_MEM_MGR_HDL_IDX_MASK      ((1 << CAM_MEM_MGR_HDL_IDX_SIZE) - 1)
+
+#define GET_MEM_HANDLE(idx, fd) \
+	((idx << (CAM_MEM_MGR_HDL_IDX_END_POS - CAM_MEM_MGR_HDL_IDX_SIZE)) | \
+	(fd << (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE))) \
+
+#define CAM_MEM_MGR_GET_HDL_IDX(hdl) (hdl & CAM_MEM_MGR_HDL_IDX_MASK)
+
+/**
+ * memory allocation type
+ */
+#define CAM_MEM_DMA_NONE                        0
+#define CAM_MEM_DMA_BIDIRECTIONAL               1
+#define CAM_MEM_DMA_TO_DEVICE                   2
+#define CAM_MEM_DMA_FROM_DEVICE                 3
+
+
+/**
+ * memory cache operation
+ */
+#define CAM_MEM_CLEAN_CACHE                     1
+#define CAM_MEM_INV_CACHE                       2
+#define CAM_MEM_CLEAN_INV_CACHE                 3
+
+
+/**
+ * struct cam_mem_alloc_out_params
+ * @buf_handle: buffer handle
+ * @fd: output buffer file descriptor
+ * @vaddr: virtual address pointer
+ */
+struct cam_mem_alloc_out_params {
+	uint32_t buf_handle;
+	int32_t fd;
+	uint64_t vaddr;
+};
+
+/**
+ * struct cam_mem_map_out_params
+ * @buf_handle: buffer handle
+ * @reserved: reserved for future
+ * @vaddr: virtual address pointer
+ */
+struct cam_mem_map_out_params {
+	uint32_t buf_handle;
+	uint32_t reserved;
+	uint64_t vaddr;
+};
+
+/**
+ * struct cam_mem_mgr_alloc_cmd
+ * @len: size of buffer to allocate
+ * @align: alignment of the buffer
+ * @mmu_hdls: array of mmu handles
+ * @num_hdl: number of handles
+ * @flags: flags of the buffer
+ * @out: out params
+ */
+/* CAM_REQ_MGR_ALLOC_BUF */
+struct cam_mem_mgr_alloc_cmd {
+	uint64_t len;
+	uint64_t align;
+	int32_t mmu_hdls[CAM_MEM_MMU_MAX_HANDLE];
+	uint32_t num_hdl;
+	uint32_t flags;
+	struct cam_mem_alloc_out_params out;
+};
+
+/**
+ * struct cam_mem_mgr_map_cmd
+ * @mmu_hdls: array of mmu handles
+ * @num_hdl: number of handles
+ * @flags: flags of the buffer
+ * @fd: output buffer file descriptor
+ * @reserved: reserved field
+ * @out: out params
+ */
+
+/* CAM_REQ_MGR_MAP_BUF */
+struct cam_mem_mgr_map_cmd {
+	int32_t mmu_hdls[CAM_MEM_MMU_MAX_HANDLE];
+	uint32_t num_hdl;
+	uint32_t flags;
+	int32_t fd;
+	uint32_t reserved;
+	struct cam_mem_map_out_params out;
+};
+
+/**
+ * struct cam_mem_mgr_map_cmd
+ * @buf_handle: buffer handle
+ * @reserved: reserved field
+ */
+/* CAM_REQ_MGR_RELEASE_BUF */
+struct cam_mem_mgr_release_cmd {
+	int32_t buf_handle;
+	uint32_t reserved;
+};
+
+/**
+ * struct cam_mem_mgr_map_cmd
+ * @buf_handle: buffer handle
+ * @ops: cache operations
+ */
+/* CAM_REQ_MGR_CACHE_OPS */
+struct cam_mem_cache_ops_cmd {
+	int32_t buf_handle;
+	uint32_t mem_cache_ops;
+};
+
+#endif /* __UAPI_LINUX_CAM_REQ_MGR_H */
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 648bd2b..78db388 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -163,7 +163,7 @@
 snd-soc-wcd9335-objs := wcd9335.o
 snd-soc-wcd934x-objs := wcd934x.o
 snd-soc-wcd9xxx-objs := wcd9xxx-resmgr.o wcd9xxx-mbhc.o wcd9xxx-common.o wcdcal-hwdep.o
-snd-soc-wcd9xxx-v2-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o
+snd-soc-wcd9xxx-v2-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o wcdcal-hwdep.o
 ifeq ($(CONFIG_COMMON_CLK_MSM), y)
 	audio-ext-clock-objs := audio-ext-clk.o
 endif
diff --git a/sound/soc/codecs/audio-ext-clk-up.c b/sound/soc/codecs/audio-ext-clk-up.c
index 498ef7b..626db69 100644
--- a/sound/soc/codecs/audio-ext-clk-up.c
+++ b/sound/soc/codecs/audio-ext-clk-up.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,7 +21,7 @@
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
-#include <dt-bindings/clock/audio-ext-clk.h>
+#include <dt-bindings/clock/qcom,audio-ext-clk.h>
 #include <sound/q6afe-v2.h>
 
 enum audio_clk_mux {
diff --git a/sound/soc/codecs/msm8x16/msm8x16-wcd.c b/sound/soc/codecs/msm8x16/msm8x16-wcd.c
index bb84725..f76dde7 100644
--- a/sound/soc/codecs/msm8x16/msm8x16-wcd.c
+++ b/sound/soc/codecs/msm8x16/msm8x16-wcd.c
@@ -1487,7 +1487,7 @@
 		return NULL;
 
 	static_cnt = of_property_count_strings(dev->of_node, static_prop_name);
-	if (IS_ERR_VALUE(static_cnt)) {
+	if (static_cnt < 0) {
 		dev_err(dev, "%s: Failed to get static supplies %d\n", __func__,
 			static_cnt);
 		ret = -EINVAL;
@@ -1496,7 +1496,7 @@
 
 	/* On-demand supply list is an optional property */
 	ond_cnt = of_property_count_strings(dev->of_node, ond_prop_name);
-	if (IS_ERR_VALUE(ond_cnt))
+	if (ond_cnt < 0)
 		ond_cnt = 0;
 
 	WARN_ON(static_cnt <= 0 || ond_cnt < 0);
diff --git a/sound/soc/codecs/msm_hdmi_codec_rx.c b/sound/soc/codecs/msm_hdmi_codec_rx.c
index fca4168..cd5e707 100644
--- a/sound/soc/codecs/msm_hdmi_codec_rx.c
+++ b/sound/soc/codecs/msm_hdmi_codec_rx.c
@@ -56,8 +56,7 @@
 
 	rc = codec_data->ext_disp_ops.get_audio_edid_blk(
 				codec_data->ext_disp_core_pdev, &edid_blk);
-
-	if (!IS_ERR_VALUE(rc)) {
+	if (rc >= 0) {
 		uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
 		uinfo->count = edid_blk.audio_data_blk_size +
 			edid_blk.spk_alloc_data_blk_size;
@@ -84,7 +83,7 @@
 
 	rc = codec_data->ext_disp_ops.get_audio_edid_blk(
 			codec_data->ext_disp_core_pdev, &edid_blk);
-	if (!IS_ERR_VALUE(rc)) {
+	if (rc >= 0) {
 		memcpy(ucontrol->value.bytes.data,
 		       edid_blk.audio_data_blk,
 		       edid_blk.audio_data_blk_size);
@@ -121,7 +120,7 @@
 
 	cable_state = codec_data->ext_disp_ops.cable_status(
 				   codec_data->ext_disp_core_pdev, 1);
-	if (IS_ERR_VALUE(cable_state)) {
+	if (cable_state < 0) {
 		dev_err(codec->dev, "%s: Error retrieving cable state from ext_disp, err:%d\n",
 			__func__, cable_state);
 		rc = cable_state;
@@ -139,7 +138,7 @@
 
 	disp_type = codec_data->ext_disp_ops.get_intf_id(
 						codec_data->ext_disp_core_pdev);
-	if (!IS_ERR_VALUE(disp_type)) {
+	if (disp_type >= 0) {
 		switch (disp_type) {
 		case EXT_DISPLAY_TYPE_DP:
 			ucontrol->value.integer.value[0] = 2;
@@ -205,7 +204,7 @@
 	codec_data->cable_status =
 		codec_data->ext_disp_ops.cable_status(
 		codec_data->ext_disp_core_pdev, 1);
-	if (IS_ERR_VALUE(codec_data->cable_status)) {
+	if (codec_data->cable_status < 0) {
 		dev_err(dai->dev,
 			"%s() ext disp core is not ready (ret val = %d)\n",
 			__func__, codec_data->cable_status);
@@ -241,7 +240,7 @@
 		return -EINVAL;
 	}
 
-	if (IS_ERR_VALUE(codec_data->cable_status)) {
+	if (codec_data->cable_status < 0) {
 		dev_err_ratelimited(dai->dev,
 			"%s() ext disp core is not ready (ret val = %d)\n",
 			__func__, codec_data->cable_status);
@@ -300,7 +299,7 @@
 
 	rc = codec_data->ext_disp_ops.audio_info_setup(
 			codec_data->ext_disp_core_pdev, &audio_setup_params);
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err_ratelimited(dai->dev,
 			"%s() ext disp core is not ready, rc: %d\n",
 			__func__, rc);
@@ -327,7 +326,7 @@
 
 	rc = codec_data->ext_disp_ops.cable_status(
 			codec_data->ext_disp_core_pdev, 0);
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(dai->dev,
 			"%s: ext disp core had problems releasing audio flag\n",
 			__func__);
@@ -432,8 +431,10 @@
 static struct snd_soc_codec_driver msm_ext_disp_audio_codec_rx_soc_driver = {
 	.probe = msm_ext_disp_audio_codec_rx_probe,
 	.remove =  msm_ext_disp_audio_codec_rx_remove,
-	.controls = msm_ext_disp_codec_rx_controls,
-	.num_controls = ARRAY_SIZE(msm_ext_disp_codec_rx_controls),
+	.component_driver = {
+		.controls = msm_ext_disp_codec_rx_controls,
+		.num_controls = ARRAY_SIZE(msm_ext_disp_codec_rx_controls),
+	},
 };
 
 static int msm_ext_disp_audio_codec_rx_plat_probe(
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index f51301d1..5c27f10 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -362,7 +362,7 @@
 	ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_TRANSPORT,
 				 WDSP_EVENT_DLOAD_SECTION,
 				 &img_section);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		WDSP_ERR(wdsp,
 			 "Failed, err = %d for base_addr = 0x%x split_fname = %s, load_addr = 0x%x, size = 0x%zx",
 			 ret, wdsp->base_addr, seg->split_fname,
@@ -396,7 +396,7 @@
 
 	ret = wdsp_get_segment_list(ctl->cdev, wdsp->img_fname,
 				    type, wdsp->seg_list, &wdsp->base_addr);
-	if (IS_ERR_VALUE(ret) ||
+	if (ret < 0 ||
 	    list_empty(wdsp->seg_list)) {
 		WDSP_ERR(wdsp, "Error %d to get image segments for type %d",
 			 ret, type);
@@ -411,7 +411,7 @@
 	/* Go through the list of segments and download one by one */
 	list_for_each_entry(seg, wdsp->seg_list, list) {
 		ret = wdsp_load_each_segment(wdsp, seg);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			wdsp_broadcast_event_downseq(wdsp,
 						     WDSP_EVENT_DLOAD_FAILED,
 						     NULL);
@@ -440,7 +440,7 @@
 	if (!is_initialized) {
 		/* Components are not initialized yet, initialize them */
 		ret = wdsp_init_components(wdsp);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			WDSP_ERR(wdsp, "INIT failed, err = %d", ret);
 			goto done;
 		}
@@ -449,7 +449,7 @@
 
 	/* Download the read-execute sections of image */
 	ret = wdsp_download_segments(wdsp, WDSP_ELF_FLAG_RE);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		WDSP_ERR(wdsp, "Error %d to download code sections", ret);
 		goto done;
 	}
@@ -469,7 +469,7 @@
 	}
 
 	ret = wdsp_init_and_dload_code_sections(wdsp);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		WDSP_ERR(wdsp, "dload code sections failed, err = %d", ret);
 }
 
@@ -486,7 +486,7 @@
 
 	/* Download the read-write sections of image */
 	ret = wdsp_download_segments(wdsp, WDSP_ELF_FLAG_WRITE);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		WDSP_ERR(wdsp, "Data section download failed, err = %d", ret);
 		goto done;
 	}
@@ -495,7 +495,7 @@
 
 	ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL,
 				 WDSP_EVENT_DO_BOOT, NULL);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		WDSP_ERR(wdsp, "Failed to boot dsp, err = %d", ret);
 		WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_DATA_DLOADED);
 		goto done;
@@ -536,7 +536,7 @@
 	wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_PRE_SHUTDOWN, NULL);
 	ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL,
 				 WDSP_EVENT_DO_SHUTDOWN, NULL);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		WDSP_ERR(wdsp, "Failed to shutdown dsp, err = %d", ret);
 		goto done;
 	}
@@ -650,7 +650,7 @@
 	ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_TRANSPORT,
 				 WDSP_EVENT_READ_SECTION,
 				 &img_section);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		WDSP_ERR(wdsp, "Failed to read dumps, size 0x%zx at addr 0x%x",
 			 img_section.size, img_section.addr);
 		goto err_read_dumps;
@@ -661,7 +661,7 @@
 	rd_seg.v_address = wdsp->dump_data.rd_v_addr;
 
 	ret = do_ramdump(wdsp->dump_data.rd_dev, &rd_seg, 1);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		WDSP_ERR(wdsp, "do_ramdump failed with error %d", ret);
 
 err_read_dumps:
@@ -689,7 +689,7 @@
 		wdsp_collect_ramdumps(wdsp);
 		ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL,
 					 WDSP_EVENT_DO_SHUTDOWN, NULL);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			WDSP_ERR(wdsp, "Failed WDSP shutdown, err = %d", ret);
 
 		wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_SHUTDOWN,
@@ -724,7 +724,7 @@
 	}
 
 	ret = wdsp_init_and_dload_code_sections(wdsp);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		WDSP_ERR(wdsp, "Failed to dload code sections err = %d",
 			 ret);
 		goto done;
@@ -834,7 +834,7 @@
 		break;
 	}
 
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		WDSP_ERR(wdsp, "handling signal %d failed with error %d",
 			 signal, ret);
 	WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->api_mutex);
@@ -870,7 +870,7 @@
 			ret = wdsp_disable_dsp(wdsp);
 	}
 
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		WDSP_DBG(wdsp, "wdsp %s failed, err = %d",
 			 vote ? "enable" : "disable", ret);
 
@@ -925,7 +925,7 @@
 		dev_info(dev, "%s: create_ramdump_device failed\n", __func__);
 
 	ret = component_bind_all(dev, wdsp->ops);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		WDSP_ERR(wdsp, "component_bind_all failed %d\n", ret);
 
 	/* Make sure all components registered ops */
@@ -1027,7 +1027,7 @@
 
 	ret = of_property_read_string(dev->of_node, "qcom,img-filename",
 				      &wdsp->img_fname);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		WDSP_ERR(wdsp, "Reading property %s failed, error = %d",
 			 "qcom,img-filename", ret);
 		return ret;
@@ -1100,7 +1100,7 @@
 
 	ret = component_master_add_with_match(mdev, &wdsp_master_ops,
 					      wdsp->match);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		WDSP_ERR(wdsp, "Failed to add master, err = %d", ret);
 		goto err_master_add;
 	}
diff --git a/sound/soc/codecs/wcd-dsp-utils.c b/sound/soc/codecs/wcd-dsp-utils.c
index 1f04891..4eafd55 100644
--- a/sound/soc/codecs/wcd-dsp-utils.c
+++ b/sound/soc/codecs/wcd-dsp-utils.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -76,7 +76,7 @@
 	snprintf(seg->split_fname, sizeof(seg->split_fname),
 		 "%s.b%02d", img_fname, phdr_idx);
 	ret = request_firmware(&seg->split_fw, seg->split_fname, dev);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(dev, "%s: firmware %s not found\n",
 			__func__, seg->split_fname);
 		goto bad_seg;
@@ -162,7 +162,7 @@
 
 	snprintf(mdt_name, sizeof(mdt_name), "%s.mdt", img_fname);
 	ret = request_firmware(&fw, mdt_name, dev);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(dev, "%s: firmware %s not found\n",
 			__func__, mdt_name);
 		goto done;
@@ -205,7 +205,7 @@
 		if (segment_match) {
 			ret = wdsp_add_segment_to_list(dev, img_fname, phdr,
 						       phdr_idx, seg_list);
-			if (IS_ERR_VALUE(ret)) {
+			if (ret < 0) {
 				wdsp_flush_segment_list(seg_list);
 				goto bad_elf;
 			}
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index cc54362..f4c68ff 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -2455,9 +2455,6 @@
 			return ret;
 		}
 
-		set_bit(INPUT_PROP_NO_DUMMY_RELEASE,
-			mbhc->button_jack.jack->input_dev->propbit);
-
 		INIT_DELAYED_WORK(&mbhc->mbhc_firmware_dwork,
 				  wcd_mbhc_fw_read);
 		INIT_DELAYED_WORK(&mbhc->mbhc_btn_dwork, wcd_btn_lpress_fn);
diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c
index b3fbc89..c08e746 100644
--- a/sound/soc/codecs/wcd-spi.c
+++ b/sound/soc/codecs/wcd-spi.c
@@ -314,7 +314,7 @@
 	xfer->len = xfer_len;
 
 	ret = spi_sync(spi, &wcd_spi->msg1);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(&spi->dev,
 			"%s: Failed, addr = 0x%x, len = %zd\n",
 			__func__, remote_addr, len);
@@ -342,7 +342,7 @@
 		else
 			ret = wcd_spi_read_single(spi, addr,
 						  (u32 *)data);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(&spi->dev,
 				"%s: %s fail iter(%d) start-word addr (0x%x)\n",
 				__func__, wcd_spi_xfer_req_str(xfer_req),
@@ -365,7 +365,7 @@
 		else
 			ret = wcd_spi_read_multi(spi, addr, data,
 						 WCD_SPI_RW_MULTI_MAX_LEN);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(&spi->dev,
 				"%s: %s fail iter(%d) max-write addr (0x%x)\n",
 				__func__, wcd_spi_xfer_req_str(xfer_req),
@@ -390,7 +390,7 @@
 			ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
 		else
 			ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(&spi->dev,
 				"%s: %s fail write addr (0x%x), size (0x%x)\n",
 				__func__, wcd_spi_xfer_req_str(xfer_req),
@@ -410,7 +410,7 @@
 			ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
 		else
 			ret = wcd_spi_read_single(spi, addr,  (u32 *) data);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(&spi->dev,
 				"%s: %s fail iter(%d) end-write addr (0x%x)\n",
 				__func__, wcd_spi_xfer_req_str(xfer_req),
@@ -479,7 +479,7 @@
 	rx_xfer->len = sizeof(status);
 
 	ret = spi_sync(spi, &wcd_spi->msg2);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
 			__func__, ret);
 		goto done;
@@ -500,21 +500,21 @@
 	u32 rd_status;
 
 	ret = wcd_spi_cmd_nop(spi);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
 			__func__, ret);
 		goto done;
 	}
 
 	ret = wcd_spi_cmd_clkreq(spi);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
 			__func__, ret);
 		goto done;
 	}
 
 	ret = wcd_spi_cmd_nop(spi);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
 			__func__, ret);
 		goto done;
@@ -543,7 +543,7 @@
 	int ret;
 
 	ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(&spi->dev, "%s: Failed, err = %d\n",
 			__func__, ret);
 	else
@@ -612,7 +612,7 @@
 				msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
 		} else {
 			ret = wcd_spi_clk_disable(spi);
-			if (IS_ERR_VALUE(ret))
+			if (ret < 0)
 				dev_err(&spi->dev,
 					"%s: Failed to disable clk err = %d\n",
 					__func__, ret);
@@ -635,11 +635,11 @@
 
 	ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
 			       WCD_SPI_CLK_FLAG_IMMEDIATE);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		goto done;
 
 	ret = wcd_spi_cmd_wr_en(spi);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		goto err_wr_en;
 
 	/*
@@ -677,7 +677,7 @@
 
 	WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
 	ret = wcd_spi_clk_disable(spi);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(&spi->dev,
 			"%s: Failed to disable clk, err = %d\n",
 			__func__, ret);
@@ -735,7 +735,7 @@
 	/* Request for clock */
 	ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
 			       WCD_SPI_CLK_FLAG_IMMEDIATE);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(&spi->dev, "%s: clk enable failed %d\n",
 			__func__, ret);
 		goto done;
@@ -743,7 +743,7 @@
 
 	/* Perform the transaction */
 	ret = __wcd_spi_data_xfer(spi, msg, req);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(&spi->dev,
 			"%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
 			__func__, wcd_spi_xfer_req_str(req),
@@ -752,7 +752,7 @@
 	/* Release the clock even if xfer failed */
 	ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
 				WCD_SPI_CLK_FLAG_DELAYED);
-	if (IS_ERR_VALUE(ret1))
+	if (ret1 < 0)
 		dev_err(&spi->dev, "%s: clk disable failed %d\n",
 			__func__, ret1);
 done:
@@ -823,7 +823,7 @@
 	msg.len = sec->size;
 
 	ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
 			__func__, msg.remote_addr, msg.len);
 	return ret;
@@ -844,7 +844,7 @@
 		__func__, msg.remote_addr, msg.len);
 
 	ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
 			__func__, msg.remote_addr, msg.len);
 	return ret;
@@ -865,7 +865,7 @@
 	case WDSP_EVENT_PRE_DLOAD_DATA:
 		ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
 				       WCD_SPI_CLK_FLAG_IMMEDIATE);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_err(&spi->dev, "%s: clk_req failed %d\n",
 				__func__, ret);
 		break;
@@ -876,7 +876,7 @@
 
 		ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
 				       WCD_SPI_CLK_FLAG_IMMEDIATE);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_err(&spi->dev, "%s: clk unvote failed %d\n",
 				__func__, ret);
 		break;
@@ -1054,7 +1054,7 @@
 	msg.flags = 0;
 
 	ret = wcd_spi_data_read(spi, &msg);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(&spi->dev,
 			"%s: Failed to read %zu bytes from addr 0x%x\n",
 			__func__, buf_size, msg.remote_addr);
@@ -1175,7 +1175,7 @@
 	int ret;
 
 	ret = wcd_spi_init(spi);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(&spi->dev, "%s: Init failed, err = %d\n",
 			__func__, ret);
 	return ret;
@@ -1281,7 +1281,7 @@
 	ret = of_property_read_u32(spi->dev.of_node,
 				   "qcom,mem-base-addr",
 				   &wcd_spi->mem_base_addr);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(&spi->dev, "%s: Missing %s DT entry",
 			__func__, "qcom,mem-base-addr");
 		goto err_ret;
diff --git a/sound/soc/codecs/wcd9330.c b/sound/soc/codecs/wcd9330.c
index d5921b7..a53a956 100644
--- a/sound/soc/codecs/wcd9330.c
+++ b/sound/soc/codecs/wcd9330.c
@@ -5765,8 +5765,7 @@
 
 	list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) {
 		port = wcd9xxx_get_slave_port(ch->ch_num);
-
-		if (IS_ERR_VALUE(port) ||
+		if (port < 0 ||
 		    !TOMTOM_VALIDATE_RX_SBPORT_RANGE(port)) {
 			dev_warn(codec->dev,
 				 "%s: invalid port ID %d returned for RX DAI\n",
@@ -5825,8 +5824,7 @@
 
 	list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) {
 		port = wcd9xxx_get_slave_port(ch->ch_num);
-
-		if (IS_ERR_VALUE(port) ||
+		if (port < 0 ||
 		    !TOMTOM_VALIDATE_TX_SBPORT_RANGE(port)) {
 			dev_warn(codec->dev,
 				 "%s: invalid port ID %d returned for TX DAI\n",
@@ -7300,8 +7298,7 @@
 				      pdata->micbias.cfilt2_mv);
 	k3 = wcd9xxx_resmgr_get_k_val(&tomtom->resmgr,
 				      pdata->micbias.cfilt3_mv);
-
-	if (IS_ERR_VALUE(k1) || IS_ERR_VALUE(k2) || IS_ERR_VALUE(k3)) {
+	if (k1 < 0 || k2 < 0 || k3 < 0) {
 		rc = -EINVAL;
 		goto done;
 	}
@@ -8467,7 +8464,7 @@
 	snd_soc_cache_sync(codec);
 
 	ret = tomtom_handle_pdata(tomtom);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		pr_err("%s: bad pdata\n", __func__);
 
 	tomtom_init_slim_slave_cfg(codec);
@@ -8875,7 +8872,7 @@
 	tomtom_codec_init_reg(codec);
 
 	ret = tomtom_handle_pdata(tomtom);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		pr_err("%s: bad pdata\n", __func__);
 		goto err_hwdep;
 	}
@@ -8972,7 +8969,7 @@
 	devm_kfree(codec->dev, tomtom);
 	return ret;
 }
-static int tomtom_codec_remove(struct snd_soc_component *component)
+static void tomtom_codec_remove(struct snd_soc_component *component)
 {
 	struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
 	struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec);
@@ -9000,7 +8997,6 @@
 	tomtom->spkdrv2_reg = NULL;
 
 	devm_kfree(codec->dev, tomtom);
-	return 0;
 }
 
 static struct regmap *tomtom_get_regmap(struct device *dev)
@@ -9011,12 +9007,12 @@
 }
 
 static struct snd_soc_codec_driver soc_codec_dev_tomtom = {
-	.controls = tomtom_snd_controls,
-	.num_controls = ARRAY_SIZE(tomtom_snd_controls),
 	.get_regmap = tomtom_get_regmap,
 	.component_driver = {
 		.probe = tomtom_codec_probe,
 		.remove = tomtom_codec_remove,
+		.controls = tomtom_snd_controls,
+		.num_controls = ARRAY_SIZE(tomtom_snd_controls),
 		.dapm_widgets = tomtom_dapm_widgets,
 		.num_dapm_widgets = ARRAY_SIZE(tomtom_dapm_widgets),
 		.dapm_routes = audio_map,
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index f12c602..3da18b1 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -1768,7 +1768,7 @@
 	cur_vout_ctl = micb_val & 0x3F;
 
 	req_vout_ctl = wcd9335_get_micb_vout_ctl_val(req_volt);
-	if (IS_ERR_VALUE(req_vout_ctl))
+	if (req_vout_ctl < 0)
 		return -EINVAL;
 	if (cur_vout_ctl == req_vout_ctl)
 		return 0;
@@ -12294,9 +12294,8 @@
 
 	tasha = snd_soc_codec_get_drvdata(codec);
 	card = codec->component.card;
-	tasha->entry = snd_register_module_info(codec_root->module,
-						"tasha",
-						codec_root);
+	tasha->entry = snd_info_create_subdir(codec_root->module,
+					      "tasha", codec_root);
 	if (!tasha->entry) {
 		dev_dbg(codec->dev, "%s: failed to create wcd9335 entry\n",
 			__func__);
@@ -12847,9 +12846,8 @@
 	vout_ctl_2 = wcd9335_get_micb_vout_ctl_val(pdata->micbias.micb2_mv);
 	vout_ctl_3 = wcd9335_get_micb_vout_ctl_val(pdata->micbias.micb3_mv);
 	vout_ctl_4 = wcd9335_get_micb_vout_ctl_val(pdata->micbias.micb4_mv);
-
-	if (IS_ERR_VALUE(vout_ctl_1) || IS_ERR_VALUE(vout_ctl_2) ||
-	    IS_ERR_VALUE(vout_ctl_3) || IS_ERR_VALUE(vout_ctl_4)) {
+	if (vout_ctl_1 < 0 || vout_ctl_2 < 0 ||
+	    vout_ctl_3 < 0 || vout_ctl_4 < 0) {
 		rc = -EINVAL;
 		goto done;
 	}
@@ -13470,7 +13468,7 @@
 
 	pdata = dev_get_platdata(codec->dev->parent);
 	ret = tasha_handle_pdata(tasha, pdata);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(codec->dev, "%s: invalid pdata\n", __func__);
 
 	/* MBHC Init */
@@ -13577,7 +13575,7 @@
 
 	pdata = dev_get_platdata(codec->dev->parent);
 	ret = tasha_handle_pdata(tasha, pdata);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		pr_err("%s: bad pdata\n", __func__);
 		goto err;
 	}
@@ -13742,7 +13740,7 @@
 	return ret;
 }
 
-static int tasha_codec_remove(struct snd_soc_component *component)
+static void tasha_codec_remove(struct snd_soc_component *component)
 {
 	struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
 	struct tasha_priv *tasha = snd_soc_codec_get_drvdata(codec);
@@ -13755,8 +13753,6 @@
 	tasha_cleanup_irqs(tasha);
 	/* Cleanup MBHC */
 	/* Cleanup resmgr */
-
-	return 0;
 }
 
 static struct regmap *tasha_get_regmap(struct device *dev)
@@ -13767,12 +13763,12 @@
 }
 
 static struct snd_soc_codec_driver soc_codec_dev_tasha = {
-	.controls = tasha_snd_controls,
-	.num_controls = ARRAY_SIZE(tasha_snd_controls),
 	.get_regmap = tasha_get_regmap,
 	.component_driver = {
 		.probe = tasha_codec_probe,
 		.remove = tasha_codec_remove,
+		.controls = tasha_snd_controls,
+		.num_controls = ARRAY_SIZE(tasha_snd_controls),
 		.dapm_widgets = tasha_dapm_widgets,
 		.num_dapm_widgets = ARRAY_SIZE(tasha_dapm_widgets),
 		.dapm_routes = audio_map,
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
index 6a5da91..9bc5d5f 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
@@ -110,7 +110,7 @@
 	else
 		ret = -EINVAL;
 
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(cntl->codec->dev,
 			"%s: failed to %s dsp\n", __func__,
 			vote ? "enable" : "disable");
@@ -356,7 +356,7 @@
 
 	if (enable) {
 		ret = wcd_cntl_cpe_fll_calibrate(cntl);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(codec->dev,
 				"%s: cpe_fll_cal failed, err = %d\n",
 				__func__, ret);
@@ -395,7 +395,7 @@
 	else
 		ret = -EINVAL;
 
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(codec->dev,
 			"%s: Failed to enable cdc clk, err = %d\n",
 			__func__, ret);
@@ -404,7 +404,7 @@
 
 	/* Configure and Enable CPE FLL clock */
 	ret = wcd_cntl_cpe_fll_ctrl(cntl, true);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(codec->dev,
 			"%s: Failed to enable cpe clk, err = %d\n",
 			__func__, ret);
@@ -443,7 +443,7 @@
 
 	/* Disable CPE FLL clock */
 	ret = wcd_cntl_cpe_fll_ctrl(cntl, false);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(codec->dev,
 			"%s: Failed to disable cpe clk, err = %d\n",
 			__func__, ret);
@@ -705,7 +705,7 @@
 	else
 		ret = -EINVAL;
 
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(cntl->codec->dev,
 			"%s: Failed to handle irq %d\n", __func__, irq);
 
@@ -737,7 +737,7 @@
 		arg.dump_size = WCD_934X_RAMDUMP_SIZE;
 		ret = cntl->m_ops->signal_handler(cntl->m_dev, WDSP_ERR_INTR,
 						  &arg);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_err(cntl->codec->dev,
 				"%s: Failed to handle fatal irq 0x%x\n",
 				__func__, status & cntl->irqs.fatal_irqs);
@@ -771,7 +771,7 @@
 		wcd_cntl_cpar_ctrl(cntl, false);
 		/* Disable all the clocks */
 		ret = wcd_cntl_clocks_disable(cntl);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_err(codec->dev,
 				"%s: Failed to disable clocks, err = %d\n",
 				__func__, ret);
@@ -782,7 +782,7 @@
 
 		/* Enable all the clocks */
 		ret = wcd_cntl_clocks_enable(cntl);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(codec->dev,
 				"%s: Failed to enable clocks, err = %d\n",
 				__func__, ret);
@@ -801,7 +801,7 @@
 	case WDSP_EVENT_DO_BOOT:
 
 		ret = wcd_cntl_do_boot(cntl);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_err(codec->dev,
 				"%s: WDSP boot failed, err = %d\n",
 				__func__, ret);
@@ -829,7 +829,7 @@
 
 	ret = kobject_init_and_add(&cntl->wcd_kobj, &wcd_cntl_ktype,
 				   kernel_kobj, dir);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(codec->dev,
 			"%s: Failed to add kobject %s, err = %d\n",
 			__func__, dir, ret);
@@ -837,7 +837,7 @@
 	}
 
 	ret = sysfs_create_file(&cntl->wcd_kobj, &cntl_attr_boot.attr);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(codec->dev,
 			"%s: Failed to add wdsp_boot sysfs entry to %s\n",
 			__func__, dir);
@@ -919,7 +919,7 @@
 	}
 
 	ret = copy_from_user(val, ubuf, count);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(cntl->codec->dev,
 			"%s: copy_from_user failed, err = %d\n",
 			__func__, ret);
@@ -998,7 +998,7 @@
 				  cntl->irqs.cpe_ipc1_irq,
 				  wcd_cntl_ipc_irq, "CPE IPC1",
 				  cntl);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(codec->dev,
 			"%s: Failed to request cpe ipc irq, err = %d\n",
 			__func__, ret);
@@ -1026,7 +1026,7 @@
 
 	/* Enable all the clocks */
 	ret = wcd_cntl_clocks_enable(cntl);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(codec->dev, "%s: Failed to enable clocks, err = %d\n",
 			__func__, ret);
 		goto err_clk_enable;
@@ -1118,7 +1118,7 @@
 	}
 
 	ret = wcd_cntl_miscdev_create(cntl);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(dev, "%s: misc dev register failed, err = %d\n",
 			__func__, ret);
 		goto done;
@@ -1127,7 +1127,7 @@
 	snprintf(wcd_cntl_dir_name, WCD_CNTL_DIR_NAME_LEN_MAX,
 		 "%s%d", "wdsp", cntl->dsp_instance);
 	ret = wcd_cntl_sysfs_init(wcd_cntl_dir_name, cntl);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(dev, "%s: sysfs_init failed, err = %d\n",
 			__func__, ret);
 		goto err_sysfs_init;
@@ -1154,7 +1154,7 @@
 	entry->c.ops = &wdsp_ssr_entry_ops;
 	entry->private_data = cntl;
 	ret = snd_info_register(entry);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(dev, "%s: Failed to register entry %s, err = %d\n",
 			__func__, proc_name, ret);
 		snd_info_free_entry(entry);
@@ -1233,7 +1233,7 @@
 		ret = cntl->m_ops->signal_handler(cntl->m_dev,
 						  WDSP_CDC_DOWN_SIGNAL,
 						  NULL);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_err(cntl->codec->dev,
 				"%s: WDSP_CDC_DOWN_SIGNAL failed, err = %d\n",
 				__func__, ret);
@@ -1243,7 +1243,7 @@
 		ret = cntl->m_ops->signal_handler(cntl->m_dev,
 						  WDSP_CDC_UP_SIGNAL,
 						  NULL);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			dev_err(cntl->codec->dev,
 				"%s: WDSP_CDC_UP_SIGNAL failed, err = %d\n",
 				__func__, ret);
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 37c1593..d30ba9c 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -1044,7 +1044,7 @@
 		} else {
 			filename = "WCD934X/WCD934X_anc.bin";
 			ret = request_firmware(&fw, filename, codec->dev);
-			if (IS_ERR_VALUE(ret)) {
+			if (ret < 0) {
 				dev_err(codec->dev, "%s: Failed to acquire ANC data: %d\n",
 					__func__, ret);
 				return ret;
@@ -2564,7 +2564,7 @@
 		snd_soc_update_bits(codec, WCD934X_SOC_MAD_AUDIO_CTL_2,
 				    0x03, 0x03);
 		rc = tavil_codec_config_mad(codec);
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			snd_soc_update_bits(codec, WCD934X_SOC_MAD_AUDIO_CTL_2,
 					    0x03, 0x00);
 			goto done;
@@ -2629,7 +2629,7 @@
 
 		snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x20, 0x20);
 		rc = __tavil_codec_enable_mad(codec, true);
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			tavil->mad_switch_cnt--;
 			goto done;
 		}
@@ -4136,7 +4136,7 @@
 	cur_vout_ctl = micb_val & 0x3F;
 
 	req_vout_ctl = wcd934x_get_micb_vout_ctl_val(req_volt);
-	if (IS_ERR_VALUE(req_vout_ctl)) {
+	if (req_vout_ctl < 0) {
 		ret = -EINVAL;
 		goto exit;
 	}
@@ -8021,9 +8021,8 @@
 
 	tavil = snd_soc_codec_get_drvdata(codec);
 	card = codec->component.card;
-	tavil->entry = snd_register_module_info(codec_root->module,
-						"tavil",
-						codec_root);
+	tavil->entry = snd_info_create_subdir(codec_root->module,
+					      "tavil", codec_root);
 	if (!tavil->entry) {
 		dev_dbg(codec->dev, "%s: failed to create wcd934x entry\n",
 			__func__);
@@ -8539,9 +8538,8 @@
 	vout_ctl_2 = wcd934x_get_micb_vout_ctl_val(pdata->micbias.micb2_mv);
 	vout_ctl_3 = wcd934x_get_micb_vout_ctl_val(pdata->micbias.micb3_mv);
 	vout_ctl_4 = wcd934x_get_micb_vout_ctl_val(pdata->micbias.micb4_mv);
-
-	if (IS_ERR_VALUE(vout_ctl_1) || IS_ERR_VALUE(vout_ctl_2) ||
-	    IS_ERR_VALUE(vout_ctl_3) || IS_ERR_VALUE(vout_ctl_4)) {
+	if (vout_ctl_1 < 0 || vout_ctl_2 < 0 ||
+	    vout_ctl_3 < 0 || vout_ctl_4 < 0) {
 		rc = -EINVAL;
 		goto done;
 	}
@@ -8823,7 +8821,7 @@
 
 	pdata = dev_get_platdata(codec->dev->parent);
 	ret = tavil_handle_pdata(tavil, pdata);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(codec->dev, "%s: invalid pdata\n", __func__);
 
 	/* Initialize MBHC module */
@@ -8907,7 +8905,7 @@
 
 	ret = wcd_cal_create_hwdep(tavil->fw_data,
 				   WCD9XXX_CODEC_HWDEP_NODE, codec);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret);
 		goto err_hwdep;
 	}
@@ -8927,7 +8925,7 @@
 
 	pdata = dev_get_platdata(codec->dev->parent);
 	ret = tavil_handle_pdata(tavil, pdata);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(codec->dev, "%s: bad pdata\n", __func__);
 		goto err_hwdep;
 	}
@@ -9028,7 +9026,7 @@
 	return ret;
 }
 
-static int tavil_soc_codec_remove(struct snd_soc_component *component)
+static void tavil_soc_codec_remove(struct snd_soc_component *component)
 {
 	struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
 	struct wcd9xxx *control;
@@ -9046,8 +9044,6 @@
 	/* Deinitialize MBHC module */
 	tavil_mbhc_deinit(codec);
 	tavil->mbhc = NULL;
-
-	return 0;
 }
 
 static struct regmap *tavil_get_regmap(struct device *dev)
@@ -9058,12 +9054,12 @@
 }
 
 static struct snd_soc_codec_driver soc_codec_dev_tavil = {
-	.controls = tavil_snd_controls,
-	.num_controls = ARRAY_SIZE(tavil_snd_controls),
 	.get_regmap = tavil_get_regmap,
 	.component_driver = {
 		.probe = tavil_soc_codec_probe,
 		.remove = tavil_soc_codec_remove,
+		.controls = tavil_snd_controls,
+		.num_controls = ARRAY_SIZE(tavil_snd_controls),
 		.dapm_widgets = tavil_dapm_widgets,
 		.num_dapm_widgets = ARRAY_SIZE(tavil_dapm_widgets),
 		.dapm_routes = tavil_audio_map,
@@ -9319,7 +9315,7 @@
 	/* Read the master bus num from DT node */
 	rc = of_property_read_u32(node, "qcom,master-bus-num",
 				  &prop_value);
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(tavil->dev, "%s: prop %s not found in node %s",
 			__func__, "qcom,master-bus-num", node->full_name);
 		goto done;
@@ -9351,7 +9347,7 @@
 
 	rc = of_property_read_u32(node, "qcom,chip-select",
 				  &prop_value);
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(tavil->dev, "%s: prop %s not found in node %s",
 			__func__, "qcom,chip-select", node->full_name);
 		goto err_dt_parse;
@@ -9360,7 +9356,7 @@
 
 	rc = of_property_read_u32(node, "qcom,max-frequency",
 				  &prop_value);
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(tavil->dev, "%s: prop %s not found in node %s",
 			__func__, "qcom,max-frequency", node->full_name);
 		goto err_dt_parse;
@@ -9370,7 +9366,7 @@
 	spi->dev.of_node = node;
 
 	rc = spi_add_device(spi);
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(tavil->dev, "%s: spi_add_device failed\n", __func__);
 		goto err_dt_parse;
 	}
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index 45bd84b..3754b57 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -4350,29 +4350,27 @@
 	snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, 0x01);
 	INIT_WORK(&mbhc->correct_plug_swch, wcd9xxx_correct_swch_plug);
 
-	if (!IS_ERR_VALUE(ret)) {
-		snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
-				    0x10);
-		wcd9xxx_enable_irq(mbhc->resmgr->core_res,
-				   mbhc->intr_ids->hph_left_ocp);
-		wcd9xxx_enable_irq(mbhc->resmgr->core_res,
-				   mbhc->intr_ids->hph_right_ocp);
+	snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10,
+			    0x10);
+	wcd9xxx_enable_irq(mbhc->resmgr->core_res,
+			   mbhc->intr_ids->hph_left_ocp);
+	wcd9xxx_enable_irq(mbhc->resmgr->core_res,
+			   mbhc->intr_ids->hph_right_ocp);
 
-		/* Initialize mechanical mbhc */
-		ret = wcd9xxx_setup_jack_detect_irq(mbhc);
+	/* Initialize mechanical mbhc */
+	ret = wcd9xxx_setup_jack_detect_irq(mbhc);
 
-		if (!ret && mbhc->mbhc_cfg->gpio) {
-			/* Requested with IRQF_DISABLED */
-			enable_irq(mbhc->mbhc_cfg->gpio_irq);
+	if (!ret && mbhc->mbhc_cfg->gpio) {
+		/* Requested with IRQF_DISABLED */
+		enable_irq(mbhc->mbhc_cfg->gpio_irq);
 
-			/* Bootup time detection */
-			wcd9xxx_swch_irq_handler(mbhc);
-		} else if (!ret && mbhc->mbhc_cfg->insert_detect) {
-			pr_debug("%s: Setting up codec own insert detection\n",
-				 __func__);
-			/* Setup for insertion detection */
-			wcd9xxx_insert_detect_setup(mbhc, true);
-		}
+		/* Bootup time detection */
+		wcd9xxx_swch_irq_handler(mbhc);
+	} else if (!ret && mbhc->mbhc_cfg->insert_detect) {
+		pr_debug("%s: Setting up codec own insert detection\n",
+			 __func__);
+		/* Setup for insertion detection */
+		wcd9xxx_insert_detect_setup(mbhc, true);
 	}
 
 	pr_debug("%s: leave\n", __func__);
@@ -5536,9 +5534,6 @@
 			return ret;
 		}
 
-		set_bit(INPUT_PROP_NO_DUMMY_RELEASE,
-			mbhc->button_jack.jack->input_dev->propbit);
-
 		INIT_DELAYED_WORK(&mbhc->mbhc_firmware_dwork,
 				  wcd9xxx_mbhc_fw_read);
 		INIT_DELAYED_WORK(&mbhc->mbhc_btn_dwork, wcd9xxx_btn_lpress_fn);
diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c
index 7fc6d2e..c98fdc9 100644
--- a/sound/soc/codecs/wcd_cpe_core.c
+++ b/sound/soc/codecs/wcd_cpe_core.c
@@ -176,7 +176,7 @@
 		__func__, core->sfr_buf_size);
 
 	rc = cpe_svc_ramdump(core->cpe_handle, &dump_seg);
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(core->dev,
 			"%s: Failed to read cpe sfr_dump, err = %d\n",
 			__func__, rc);
@@ -216,7 +216,7 @@
 		__func__);
 
 	rc = cpe_svc_ramdump(core->cpe_handle, &dump_seg);
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(core->dev,
 			"%s: Failed to read CPE ramdump, err = %d\n",
 			__func__, rc);
@@ -473,7 +473,7 @@
 	wcd9xxx = dev_get_drvdata(codec->dev->parent);
 	snprintf(mdt_name, sizeof(mdt_name), "%s.mdt", core->fname);
 	ret = request_firmware(&fw, mdt_name, core->dev);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(core->dev, "firmware %s not found\n", mdt_name);
 		return ret;
 	}
@@ -491,7 +491,7 @@
 	if (load_type == ELF_FLAG_EXECUTE) {
 		/* Reset CPE first */
 		ret = cpe_svc_reset(core->cpe_handle);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(core->dev,
 				"%s: Failed to reset CPE with error %d\n",
 				__func__, ret);
@@ -534,7 +534,7 @@
 		if (load_segment) {
 			ret = wcd_cpe_load_each_segment(core,
 						phdr_idx, phdr);
-			if (IS_ERR_VALUE(ret)) {
+			if (ret < 0) {
 				dev_err(core->dev,
 					"Failed to load segment %d, aborting img dload\n",
 					phdr_idx);
@@ -666,7 +666,7 @@
 	}
 
 	ret = cpe_svc_process_irq(core->cpe_handle, CPE_IRQ_OUTBOX_IRQ);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		dev_err(core->dev,
 			"%s: Error processing irq from cpe_Services\n",
 			__func__);
@@ -822,7 +822,7 @@
 	if (enable) {
 		/* Reset CPE first */
 		ret = cpe_svc_reset(core->cpe_handle);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(core->dev,
 				"%s: CPE Reset failed, error = %d\n",
 				__func__, ret);
@@ -850,7 +850,7 @@
 		}
 
 		ret = wcd_cpe_enable_cpe_clks(core, true);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(core->dev,
 				"%s: CPE clk enable failed, err = %d\n",
 				__func__, ret);
@@ -859,7 +859,7 @@
 
 		ret = cpe_svc_boot(core->cpe_handle,
 				   core->cpe_debug_mode);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(core->dev,
 				"%s: Failed to boot CPE\n",
 				__func__);
@@ -899,7 +899,7 @@
 		}
 
 		ret = cpe_svc_shutdown(core->cpe_handle);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(core->dev,
 				"%s: CPE shutdown failed, error %d\n",
 				__func__, ret);
@@ -1036,7 +1036,7 @@
 
 	if (core->cpe_users > 0) {
 		rc = cpe_svc_process_irq(core->cpe_handle, irq);
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			/*
 			 * Even if process_irq fails,
 			 * wait for cpe to move to offline state
@@ -1826,7 +1826,7 @@
 
 	/* Enable the clks for cpe */
 	ret = wcd_cpe_enable_cpe_clks(core, true);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(core->dev,
 			"%s: CPE clk enable failed, err = %d\n",
 			__func__, ret);
@@ -1835,7 +1835,7 @@
 
 	/* Get the CPE_STATUS */
 	ret = cpe_svc_ftm_test(core->cpe_handle, &cpe_ftm_test_status);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(core->dev,
 			"%s: CPE FTM test failed, err = %d\n",
 			__func__, ret);
@@ -1847,7 +1847,7 @@
 
 	/* Disable the clks for cpe */
 	ret = wcd_cpe_enable_cpe_clks(core, false);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(core->dev,
 			"%s: CPE clk disable failed, err = %d\n",
 			__func__, ret);
@@ -2007,7 +2007,7 @@
 
 	core_d = core;
 	ret = wcd_cpe_cal_init(core);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(core->dev,
 			"%s: CPE calibration init failed, err = %d\n",
 			__func__, ret);
diff --git a/sound/soc/codecs/wsa881x-analog.c b/sound/soc/codecs/wsa881x-analog.c
index cee31964..2d87db1 100644
--- a/sound/soc/codecs/wsa881x-analog.c
+++ b/sound/soc/codecs/wsa881x-analog.c
@@ -1074,7 +1074,7 @@
 	return 0;
 }
 
-static int wsa881x_remove(struct snd_soc_component *component)
+static void wsa881x_remove(struct snd_soc_component *component)
 {
 	struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
 	struct wsa881x_pdata *wsa881x = snd_soc_codec_get_drvdata(codec);
@@ -1084,7 +1084,6 @@
 
 	mutex_destroy(&wsa881x->bg_lock);
 	mutex_destroy(&wsa881x->res_lock);
-	return 0;
 }
 
 static struct snd_soc_codec_driver soc_codec_dev_wsa881x = {
@@ -1095,13 +1094,12 @@
 	.reg_cache_default = wsa881x_ana_reg_defaults,
 	.reg_word_size = 1,
 
-	.controls = wsa881x_snd_controls,
-	.num_controls = ARRAY_SIZE(wsa881x_snd_controls),
-
 	.component_driver = {
 		.probe	= wsa881x_probe,
 		.remove	= wsa881x_remove,
 
+		.controls = wsa881x_snd_controls,
+		.num_controls = ARRAY_SIZE(wsa881x_snd_controls),
 		.dapm_widgets = wsa881x_dapm_widgets,
 		.num_dapm_widgets = ARRAY_SIZE(wsa881x_dapm_widgets),
 		.dapm_routes = wsa881x_audio_map,
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index a23ce7c..c76d335 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -253,9 +253,9 @@
 	snprintf(name, sizeof(name), "%s.%x", "wsa881x",
 		 (u32)wsa881x->swr_slave->addr);
 
-	wsa881x->entry = snd_register_module_info(codec_root->module,
-						  (const char *)name,
-						  codec_root);
+	wsa881x->entry = snd_info_create_subdir(codec_root->module,
+						(const char *)name,
+						codec_root);
 	if (!wsa881x->entry) {
 		dev_dbg(codec->dev, "%s: failed to create wsa881x entry\n",
 			__func__);
@@ -1053,7 +1053,7 @@
 	return 0;
 }
 
-static int wsa881x_remove(struct snd_soc_component *component)
+static void wsa881x_remove(struct snd_soc_component *component)
 {
 	struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
 	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
@@ -1062,8 +1062,6 @@
 		wsa881x_deinit_thermal(wsa881x->tz_pdata.tz_dev);
 	mutex_destroy(&wsa881x->bg_lock);
 	mutex_destroy(&wsa881x->res_lock);
-
-	return 0;
 }
 
 static struct regmap *wsa881x_get_regmap(struct device *dev)
@@ -1077,12 +1075,12 @@
 }
 
 static struct snd_soc_codec_driver soc_codec_dev_wsa881x = {
-	.controls = wsa881x_snd_controls,
-	.num_controls = ARRAY_SIZE(wsa881x_snd_controls),
 	.get_regmap = wsa881x_get_regmap,
 	.component_driver = {
 		.probe = wsa881x_probe,
 		.remove = wsa881x_remove,
+		.controls = wsa881x_snd_controls,
+		.num_controls = ARRAY_SIZE(wsa881x_snd_controls),
 		.dapm_widgets = wsa881x_dapm_widgets,
 		.num_dapm_widgets = ARRAY_SIZE(wsa881x_dapm_widgets),
 		.dapm_routes = wsa881x_audio_map,
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index d0c9c07..b3ac439 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -248,7 +248,6 @@
 	select MSM_QDSP6V2_CODECS
 	select SND_SOC_WCD934X
 	select SND_SOC_WSA881X
-	select SND_SOC_MSM_HDMI_CODEC_RX
 	select DTS_SRS_TM
 	select QTI_PP
 	select MSM_ULTRASOUND
diff --git a/sound/soc/msm/msm-cpe-lsm.c b/sound/soc/msm/msm-cpe-lsm.c
index 71b2598..44927c8 100644
--- a/sound/soc/msm/msm-cpe-lsm.c
+++ b/sound/soc/msm/msm-cpe-lsm.c
@@ -1119,7 +1119,7 @@
 		if (session->lab_enable) {
 			rc = msm_cpe_lab_buf_alloc(substream,
 						   session, dma_data);
-			if (IS_ERR_VALUE(rc)) {
+			if (rc < 0) {
 				dev_err(rtd->dev,
 					"%s: lab buffer alloc failed, err = %d\n",
 					__func__, rc);
@@ -1138,7 +1138,7 @@
 						   &substream->dma_buffer);
 			rc = lsm_ops->lsm_lab_control(cpe->core_handle,
 					session, true);
-			if (IS_ERR_VALUE(rc)) {
+			if (rc < 0) {
 				dev_err(rtd->dev,
 					"%s: Lab Enable Failed rc %d\n",
 					__func__, rc);
@@ -1152,7 +1152,7 @@
 			 * the lab buffer.
 			 */
 			rc = msm_cpe_lsm_lab_stop(substream);
-			if (IS_ERR_VALUE(rc)) {
+			if (rc < 0) {
 				dev_err(rtd->dev,
 					"%s: LAB stop failed, error = %d\n",
 					__func__, rc);
@@ -1164,7 +1164,7 @@
 			 */
 			rc = msm_cpe_lab_buf_dealloc(substream,
 						     session, dma_data);
-			if (IS_ERR_VALUE(rc)) {
+			if (rc < 0) {
 				dev_err(rtd->dev,
 					"%s: lab buffer free failed, err = %d\n",
 					__func__, rc);
@@ -1289,7 +1289,7 @@
 			 */
 			rc = msm_cpe_lab_buf_dealloc(substream,
 						session, dma_data);
-			if (IS_ERR_VALUE(rc))
+			if (rc < 0)
 				dev_err(rtd->dev,
 					"%s: lab buffer free failed, err = %d\n",
 					__func__, rc);
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index a6e0c04..73755ed 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -2921,7 +2921,7 @@
 	case MSM_BACKEND_DAI_HDMI_RX:
 	case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
 		idx = msm_ext_disp_get_idx_from_beid(dai_link->id);
-		if (IS_ERR_VALUE(idx)) {
+		if (idx < 0) {
 			pr_err("%s: Incorrect ext disp idx %d\n",
 			       __func__, idx);
 			rc = idx;
@@ -3445,9 +3445,10 @@
 					struct snd_soc_component, list_aux);
 			if (!strcmp(aux_comp->name, WSA8810_NAME_1) ||
 			    !strcmp(aux_comp->name, WSA8810_NAME_2)) {
-				tavil_set_spkr_mode(rtd->codec, SPKR_MODE_1);
+				tavil_set_spkr_mode(rtd->codec,
+						    WCD934X_SPKR_MODE_1);
 				tavil_set_spkr_gain_offset(rtd->codec,
-							RX_GAIN_OFFSET_M1P5_DB);
+						WCD934X_RX_GAIN_OFFSET_M1P5_DB);
 			}
 		}
 		card = rtd->card->snd_card;
@@ -3795,7 +3796,6 @@
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int index = cpu_dai->id - 1;
-	return ret = 0;
 
 	dev_dbg(rtd->card->dev,
 		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
@@ -3824,7 +3824,7 @@
 			ret = -EINVAL;
 		}
 	}
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		auxpcm_intf_conf[index].ref_cnt--;
 
 	mutex_unlock(&auxpcm_intf_conf[index].lock);
@@ -3952,7 +3952,7 @@
 	int index = cpu_dai->id;
 
 	port_id = msm_get_port_id(rtd->dai_link->id);
-	if (IS_ERR_VALUE(port_id)) {
+	if (port_id < 0) {
 		dev_err(rtd->card->dev, "%s: Invalid port_id\n", __func__);
 		ret = port_id;
 		goto done;
@@ -4006,7 +4006,7 @@
 	mutex_lock(&mi2s_intf_conf[index].lock);
 	if (++mi2s_intf_conf[index].ref_cnt == 1) {
 		ret = msm_mi2s_set_sclk(substream, true);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(rtd->card->dev,
 				"%s: afe lpass clock failed to enable MI2S clock, err:%d\n",
 				__func__, ret);
@@ -4028,17 +4028,17 @@
 		if (!mi2s_intf_conf[index].msm_is_mi2s_master)
 			fmt = SND_SOC_DAIFMT_CBM_CFM;
 		ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			pr_err("%s: set fmt cpu dai failed for MI2S (%d), err:%d\n",
 				__func__, index, ret);
 			goto clk_off;
 		}
 	}
 clk_off:
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		msm_mi2s_set_sclk(substream, false);
 clean_up:
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		mi2s_intf_conf[index].ref_cnt--;
 	mutex_unlock(&mi2s_intf_conf[index].lock);
 done:
diff --git a/sound/soc/msm/msmfalcon-common.c b/sound/soc/msm/msmfalcon-common.c
index 5928982..fba9c28 100644
--- a/sound/soc/msm/msmfalcon-common.c
+++ b/sound/soc/msm/msmfalcon-common.c
@@ -1920,7 +1920,7 @@
 			ret = -EINVAL;
 		}
 	}
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		auxpcm_intf_conf[index].ref_cnt--;
 
 	mutex_unlock(&auxpcm_intf_conf[index].lock);
@@ -2055,7 +2055,7 @@
 	int index = cpu_dai->id;
 
 	port_id = msm_get_port_id(rtd->dai_link->be_id);
-	if (IS_ERR_VALUE(port_id)) {
+	if (port_id < 0) {
 		dev_err(rtd->card->dev, "%s: Invalid port_id\n", __func__);
 		ret = port_id;
 		goto done;
@@ -2116,7 +2116,7 @@
 	mutex_lock(&mi2s_intf_conf[index].lock);
 	if (++mi2s_intf_conf[index].ref_cnt == 1) {
 		ret = msm_mi2s_set_sclk(substream, true);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(rtd->card->dev,
 				"%s: afe lpass clock failed to enable MI2S clock, err:%d\n",
 				__func__, ret);
@@ -2138,7 +2138,7 @@
 		if (!mi2s_intf_conf[index].msm_is_mi2s_master)
 			fmt = SND_SOC_DAIFMT_CBM_CFM;
 		ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(rtd->card->dev,
 				"%s: set fmt cpu dai failed for MI2S (%d), err:%d\n",
 				__func__, index, ret);
@@ -2146,10 +2146,10 @@
 		}
 	}
 clk_off:
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		msm_mi2s_set_sclk(substream, false);
 clean_up:
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		mi2s_intf_conf[index].ref_cnt--;
 	mutex_unlock(&mi2s_intf_conf[index].lock);
 done:
diff --git a/sound/soc/msm/msmfalcon-internal.c b/sound/soc/msm/msmfalcon-internal.c
index 34db650..50efd69 100644
--- a/sound/soc/msm/msmfalcon-internal.c
+++ b/sound/soc/msm/msmfalcon-internal.c
@@ -1067,7 +1067,7 @@
 	int index;
 
 	port_id = int_mi2s_get_port_id(rtd->dai_link->be_id);
-	if (IS_ERR_VALUE(port_id)) {
+	if (port_id < 0) {
 		dev_err(rtd->card->dev, "%s: Invalid port_id\n", __func__);
 		ret = port_id;
 		goto done;
diff --git a/sound/soc/msm/msmskunk.c b/sound/soc/msm/msmskunk.c
index 7920195..4759235 100644
--- a/sound/soc/msm/msmskunk.c
+++ b/sound/soc/msm/msmskunk.c
@@ -2860,7 +2860,7 @@
 
 	case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
 		idx = msm_ext_disp_get_idx_from_beid(dai_link->id);
-		if (IS_ERR_VALUE(idx)) {
+		if (idx < 0) {
 			pr_err("%s: Incorrect ext disp idx %d\n",
 			       __func__, idx);
 			rc = idx;
@@ -3342,9 +3342,9 @@
 				struct snd_soc_component, list_aux);
 		if (!strcmp(aux_comp->name, WSA8810_NAME_1) ||
 		    !strcmp(aux_comp->name, WSA8810_NAME_2)) {
-			tavil_set_spkr_mode(rtd->codec, SPKR_MODE_1);
+			tavil_set_spkr_mode(rtd->codec, WCD934X_SPKR_MODE_1);
 			tavil_set_spkr_gain_offset(rtd->codec,
-						RX_GAIN_OFFSET_M1P5_DB);
+					WCD934X_RX_GAIN_OFFSET_M1P5_DB);
 		}
 	}
 	card = rtd->card->snd_card;
@@ -3589,7 +3589,6 @@
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int index = cpu_dai->id - 1;
-	return ret = 0;
 
 	dev_dbg(rtd->card->dev,
 		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
@@ -3618,7 +3617,7 @@
 			ret = -EINVAL;
 		}
 	}
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		auxpcm_intf_conf[index].ref_cnt--;
 
 	mutex_unlock(&auxpcm_intf_conf[index].lock);
@@ -3746,7 +3745,7 @@
 	int index = cpu_dai->id;
 
 	port_id = msm_get_port_id(rtd->dai_link->id);
-	if (IS_ERR_VALUE(port_id)) {
+	if (port_id < 0) {
 		dev_err(rtd->card->dev, "%s: Invalid port_id\n", __func__);
 		ret = port_id;
 		goto err;
@@ -3800,7 +3799,7 @@
 	mutex_lock(&mi2s_intf_conf[index].lock);
 	if (++mi2s_intf_conf[index].ref_cnt == 1) {
 		ret = msm_mi2s_set_sclk(substream, true);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(rtd->card->dev,
 				"%s: afe lpass clock failed to enable MI2S clock, err:%d\n",
 				__func__, ret);
@@ -3822,17 +3821,17 @@
 		if (!mi2s_intf_conf[index].msm_is_mi2s_master)
 			fmt = SND_SOC_DAIFMT_CBM_CFM;
 		ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			pr_err("%s: set fmt cpu dai failed for MI2S (%d), err:%d\n",
 				__func__, index, ret);
 			goto clk_off;
 		}
 	}
 clk_off:
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		msm_mi2s_set_sclk(substream, false);
 clean_up:
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		mi2s_intf_conf[index].ref_cnt--;
 	mutex_unlock(&mi2s_intf_conf[index].lock);
 err:
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 9ea69ab..48f58f1 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -1145,7 +1145,7 @@
 		}
 		prtd->gapless_state.stream_opened[stream_index] = 1;
 
-		pr_debug("%s: BE id %d\n", __func__, soc_prtd->dai_link->be_id);
+		pr_debug("%s: BE id %d\n", __func__, soc_prtd->dai_link->id);
 		ret = msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->id,
 				ac->perf_mode,
 				prtd->session_id,
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
index d4118b0..dffac45 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
@@ -222,8 +222,7 @@
 	}
 
 	rc = afe_close(dai->id); /* can block */
-
-	if (IS_ERR_VALUE(rc))
+	if (rc < 0)
 		dev_err(dai->dev, "fail to close AFE port\n");
 
 	pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
@@ -246,7 +245,7 @@
 	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
 		rc = afe_port_start(dai->id, &dai_data->port_config,
 				    dai_data->rate);
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to open AFE port %x\n",
 				dai->id);
 		else
@@ -351,8 +350,7 @@
 	/* If AFE port is still up, close it */
 	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
 		rc = afe_close(dai->id); /* can block */
-
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to close AFE port\n");
 
 		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index e3e2e7e..52c2296 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -19,7 +19,6 @@
 #include <linux/slab.h>
 #include <linux/clk.h>
 #include <linux/of_device.h>
-#include <linux/clk/msm-clk.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
@@ -772,11 +771,11 @@
 			__func__, dai->id);
 
 	rc = afe_close(aux_dai_data->rx_pid); /* can block */
-	if (IS_ERR_VALUE(rc))
+	if (rc < 0)
 		dev_err(dai->dev, "fail to close PCM_RX  AFE port\n");
 
 	rc = afe_close(aux_dai_data->tx_pid);
-	if (IS_ERR_VALUE(rc))
+	if (rc < 0)
 		dev_err(dai->dev, "fail to close AUX PCM TX port\n");
 
 	msm_dai_q6_auxpcm_set_clk(aux_dai_data, aux_dai_data->rx_pid, false);
@@ -827,7 +826,7 @@
 			__func__, dai->id);
 
 	rc = afe_q6_interface_prepare();
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(dai->dev, "fail to open AFE APR\n");
 		goto fail;
 	}
@@ -979,10 +978,10 @@
 	if (test_bit(STATUS_TX_PORT, aux_dai_data->auxpcm_port_status) ||
 	    test_bit(STATUS_RX_PORT, aux_dai_data->auxpcm_port_status)) {
 		rc = afe_close(aux_dai_data->rx_pid); /* can block */
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to close AUXPCM RX AFE port\n");
 		rc = afe_close(aux_dai_data->tx_pid);
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to close AUXPCM TX AFE port\n");
 		clear_bit(STATUS_TX_PORT, aux_dai_data->auxpcm_port_status);
 		clear_bit(STATUS_RX_PORT, aux_dai_data->auxpcm_port_status);
@@ -1274,8 +1273,7 @@
 	}
 
 	rc = afe_close(dai->id);
-
-	if (IS_ERR_VALUE(rc))
+	if (rc < 0)
 		dev_err(dai->dev, "fail to close AFE port\n");
 
 	pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
@@ -1291,14 +1289,10 @@
 	struct msm_dai_q6_spdif_dai_data *dai_data = dev_get_drvdata(dai->dev);
 	int rc = 0;
 
-	if (IS_ERR_VALUE(rc)) {
-		dev_err(dai->dev, "%s: clk_config failed", __func__);
-		return rc;
-	}
 	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
 		rc = afe_spdif_port_start(dai->id, &dai_data->spdif_port,
 				dai_data->rate);
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to open AFE port 0x%x\n",
 					dai->id);
 		else
@@ -1373,8 +1367,7 @@
 	/* If AFE port is still up, close it */
 	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
 		rc = afe_close(dai->id); /* can block */
-
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to close AFE port\n");
 
 		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
@@ -1442,7 +1435,7 @@
 			rc = afe_port_start(dai->id, &dai_data->port_config,
 						dai_data->rate);
 		}
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to open AFE port 0x%x\n",
 				dai->id);
 		else
@@ -1809,8 +1802,7 @@
 	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
 		pr_debug("%s: stop pseudo port:%d\n", __func__,  dai->id);
 		rc = afe_close(dai->id); /* can block */
-
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to close AFE port\n");
 		pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
 			*dai_data->status_mask);
@@ -2375,7 +2367,7 @@
 				 dai_data));
 		break;
 	}
-	if (IS_ERR_VALUE(rc))
+	if (rc < 0)
 		dev_err(dai->dev, "%s: err add config ctl, DAI = %s\n",
 			__func__, dai->name);
 
@@ -2394,8 +2386,7 @@
 	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
 		pr_debug("%s: stop pseudo port:%d\n", __func__,  dai->id);
 		rc = afe_close(dai->id); /* can block */
-
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to close AFE port\n");
 		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
 	}
@@ -3382,8 +3373,7 @@
 		kcontrol = snd_ctl_new1(ctrl,
 					&mi2s_dai_data->rx_dai.mi2s_dai_data);
 		rc = snd_ctl_add(dai->component->card->snd_card, kcontrol);
-
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			dev_err(dai->dev, "%s: err add RX fmt ctl DAI = %s\n",
 				__func__, dai->name);
 			goto rtn;
@@ -3410,8 +3400,7 @@
 		rc = snd_ctl_add(dai->component->card->snd_card,
 				snd_ctl_new1(ctrl,
 				&mi2s_dai_data->tx_dai.mi2s_dai_data));
-
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			if (kcontrol)
 				snd_ctl_remove(dai->component->card->snd_card,
 						kcontrol);
@@ -3435,7 +3424,7 @@
 	if (test_bit(STATUS_PORT_STARTED,
 		     mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask)) {
 		rc = afe_close(MI2S_RX); /* can block */
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to close MI2S_RX port\n");
 		clear_bit(STATUS_PORT_STARTED,
 			  mi2s_dai_data->rx_dai.mi2s_dai_data.status_mask);
@@ -3443,7 +3432,7 @@
 	if (test_bit(STATUS_PORT_STARTED,
 		     mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask)) {
 		rc = afe_close(MI2S_TX); /* can block */
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to close MI2S_TX port\n");
 		clear_bit(STATUS_PORT_STARTED,
 			  mi2s_dai_data->tx_dai.mi2s_dai_data.status_mask);
@@ -3598,8 +3587,7 @@
 		 */
 		rc = afe_port_start(port_id, &dai_data->port_config,
 				    dai_data->rate);
-
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to open AFE port 0x%x\n",
 				dai->id);
 		else
@@ -3820,7 +3808,7 @@
 
 	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
 		rc = afe_close(port_id);
-		if (IS_ERR_VALUE(rc))
+		if (rc < 0)
 			dev_err(dai->dev, "fail to close AFE port\n");
 		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
 	}
@@ -4293,8 +4281,7 @@
 
 	rc = msm_dai_q6_mi2s_get_lineconfig(mi2s_pdata->rx_sd_lines,
 					    &sd_line, &ch_cnt);
-
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(&pdev->dev, "invalid MI2S RX sd line config\n");
 		goto rtn;
 	}
@@ -4311,8 +4298,7 @@
 	}
 	rc = msm_dai_q6_mi2s_get_lineconfig(mi2s_pdata->tx_sd_lines,
 					    &sd_line, &ch_cnt);
-
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(&pdev->dev, "invalid MI2S TX sd line config\n");
 		goto rtn;
 	}
@@ -4412,13 +4398,12 @@
 
 	rc = msm_dai_q6_mi2s_platform_data_validation(pdev,
 			&msm_dai_q6_mi2s_dai[mi2s_intf]);
-	if (IS_ERR_VALUE(rc))
+	if (rc < 0)
 		goto free_dai_data;
 
 	rc = snd_soc_register_component(&pdev->dev, &msm_q6_mi2s_dai_component,
 	&msm_dai_q6_mi2s_dai[mi2s_intf], 1);
-
-	if (IS_ERR_VALUE(rc))
+	if (rc < 0)
 		goto err_register;
 	return 0;
 
@@ -5772,8 +5757,7 @@
 					tdm_dai_data);
 		rc = snd_ctl_add(dai->component->card->snd_card,
 				 data_format_kcontrol);
-
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			dev_err(dai->dev, "%s: err add data format ctrl DAI = %s\n",
 				__func__, dai->name);
 			goto rtn;
@@ -5785,8 +5769,7 @@
 					tdm_dai_data);
 		rc = snd_ctl_add(dai->component->card->snd_card,
 				 header_type_kcontrol);
-
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			if (data_format_kcontrol)
 				snd_ctl_remove(dai->component->card->snd_card,
 					data_format_kcontrol);
@@ -5801,8 +5784,7 @@
 					tdm_dai_data);
 		rc = snd_ctl_add(dai->component->card->snd_card,
 				 header_kcontrol);
-
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			if (header_type_kcontrol)
 				snd_ctl_remove(dai->component->card->snd_card,
 					header_type_kcontrol);
@@ -5843,7 +5825,7 @@
 	/* If AFE port is still up, close it */
 	if (test_bit(STATUS_PORT_STARTED, tdm_dai_data->status_mask)) {
 		rc = afe_close(dai->id); /* can block */
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			dev_err(dai->dev, "%s: fail to close AFE port 0x%x\n",
 				__func__, dai->id);
 		}
@@ -5854,13 +5836,13 @@
 		if (atomic_read(group_ref) == 0) {
 			rc = afe_port_group_enable(group_id,
 				NULL, false);
-			if (IS_ERR_VALUE(rc)) {
+			if (rc < 0) {
 				dev_err(dai->dev, "fail to disable AFE group 0x%x\n",
 					group_id);
 			}
 			rc = msm_dai_q6_tdm_set_clk(tdm_dai_data,
 				dai->id, false);
-			if (IS_ERR_VALUE(rc)) {
+			if (rc < 0) {
 				dev_err(dai->dev, "%s: fail to disable AFE clk 0x%x\n",
 					__func__, dai->id);
 			}
@@ -6308,14 +6290,14 @@
 			 */
 			rc = msm_dai_q6_tdm_set_clk(dai_data,
 				dai->id, true);
-			if (IS_ERR_VALUE(rc)) {
+			if (rc < 0) {
 				dev_err(dai->dev, "%s: fail to enable AFE clk 0x%x\n",
 					__func__, dai->id);
 				goto rtn;
 			}
 			rc = afe_port_group_enable(group_id,
 				&dai_data->group_cfg, true);
-			if (IS_ERR_VALUE(rc)) {
+			if (rc < 0) {
 				dev_err(dai->dev, "%s: fail to enable AFE group 0x%x\n",
 					__func__, group_id);
 				goto rtn;
@@ -6324,7 +6306,7 @@
 
 		rc = afe_tdm_port_start(dai->id, &dai_data->port_cfg,
 			dai_data->rate);
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			if (atomic_read(group_ref) == 0) {
 				afe_port_group_enable(group_id,
 					NULL, false);
@@ -6372,7 +6354,7 @@
 
 	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
 		rc = afe_close(dai->id);
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			dev_err(dai->dev, "%s: fail to close AFE port 0x%x\n",
 				__func__, dai->id);
 		}
@@ -6383,13 +6365,13 @@
 		if (atomic_read(group_ref) == 0) {
 			rc = afe_port_group_enable(group_id,
 				NULL, false);
-			if (IS_ERR_VALUE(rc)) {
+			if (rc < 0) {
 				dev_err(dai->dev, "%s: fail to disable AFE group 0x%x\n",
 					__func__, group_id);
 			}
 			rc = msm_dai_q6_tdm_set_clk(dai_data,
 				dai->id, false);
-			if (IS_ERR_VALUE(rc)) {
+			if (rc < 0) {
 				dev_err(dai->dev, "%s: fail to disable AFE clk 0x%x\n",
 					__func__, dai->id);
 			}
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-slim.c b/sound/soc/msm/qdsp6v2/msm-dai-slim.c
index e012cf2..77fb8d4 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-slim.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-slim.c
@@ -130,8 +130,7 @@
 					 SLIM_REQ_DEFAULT, dai_data->ch_cnt,
 					 &(dma_data->ph),
 					 sizeof(dma_data->ph));
-
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			dev_err(&sdev->dev,
 				"%s:alloc mgrport failed rc %d\n",
 				__func__, rc);
@@ -141,7 +140,7 @@
 		rc = slim_config_mgrports(sdev, &(dma_data->ph),
 					  dai_data->ch_cnt,
 					  &(dai_data->port_cfg));
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			dev_err(&sdev->dev,
 				"%s: config mgrport failed rc %d\n",
 				__func__, rc);
@@ -152,7 +151,7 @@
 			rc = slim_connect_sink(sdev,
 					       &dma_data->ph, 1,
 					       dai_data->chan_h[i]);
-			if (IS_ERR_VALUE(rc)) {
+			if (rc < 0) {
 				dev_err(&sdev->dev,
 					"%s: slim_connect_sink failed, ch = %d, err = %d\n",
 					__func__, i, rc);
@@ -163,7 +162,7 @@
 		rc = slim_control_ch(sdev,
 				     dai_data->grph,
 				     SLIM_CH_ACTIVATE, true);
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			dev_err(&sdev->dev,
 				"%s: slim activate ch failed, err = %d\n",
 				__func__, rc);
@@ -182,7 +181,7 @@
 		rc = slim_control_ch(sdev,
 				     dai_data->grph,
 				     SLIM_CH_REMOVE, true);
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			dev_err(&sdev->dev,
 				"%s: slim activate ch failed, err = %d\n",
 				__func__, rc);
@@ -191,7 +190,7 @@
 
 		rc = slim_dealloc_mgrports(sdev,
 					   &dma_data->ph, 1);
-		if (IS_ERR_VALUE(rc)) {
+		if (rc < 0) {
 			dev_err(&sdev->dev,
 				"%s: dealloc mgrport failed, err = %d\n",
 				__func__, rc);
@@ -206,7 +205,7 @@
 err_done:
 	rc1 = slim_dealloc_mgrports(sdev,
 				   &dma_data->ph, 1);
-	if (IS_ERR_VALUE(rc1))
+	if (rc1 < 0)
 		dev_err(&sdev->dev,
 			"%s: dealloc mgrport failed, err = %d\n",
 			__func__, rc1);
@@ -596,8 +595,7 @@
 
 	rc = snd_soc_register_component(&sdev->dev, &msm_dai_slim_component,
 					msm_slim_dais, NUM_SLIM_DAIS);
-
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		dev_err(dev, "%s: failed to register DAI, err = %d\n",
 			__func__, rc);
 		goto err_reg_comp;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.c
index 2e86be0..1dfbd7a 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.c
@@ -157,7 +157,7 @@
 			__func__, msm_bedais[dai_link->id].name);
 		return rc;
 	}
-	if (IS_ERR_VALUE(rc)) {
+	if (rc < 0) {
 		pr_err("%s: hwdep intf failed to create %s rc %d\n", __func__,
 			msm_bedais[dai_link->id].name, rc);
 		return rc;
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index ac3dd0e..f544393 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -19,7 +19,6 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/qdsp6v2/apr.h>
-#include <soc/qcom/smd.h>
 #include <sound/q6core.h>
 #include <sound/audio_cal_utils.h>
 
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 8ec1e3c..e6a67cd 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -911,7 +911,17 @@
 static const struct snd_soc_dai_ops null_dai_ops = {
 };
 
-static struct snd_soc_component *soc_find_component(
+/**
+ * soc_find_component: find a component from component_list in ASoC core
+ *
+ * @of_node: of_node of the component to query.
+ * @name: name of the component to query.
+ *
+ * function to find out if a component is already registered with ASoC core.
+ *
+ * Returns component handle for success, else NULL error.
+ */
+struct snd_soc_component *soc_find_component(
 	const struct device_node *of_node, const char *name)
 {
 	struct snd_soc_component *component;
@@ -935,6 +945,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL(soc_find_component);
 
 /**
  * snd_soc_find_dai - Find a registered DAI